From 09c916c6272abde6ada166a8838d450038ec1284 Mon Sep 17 00:00:00 2001 From: Cheng JIANG Date: Mon, 18 Oct 2021 14:37:22 +0800 Subject: [PATCH 001/162] AssetId trait should also contain TypeInfo bound (#10038) --- frame/support/src/traits/tokens/misc.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index 214c28708a196..100138171abe7 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -161,8 +161,8 @@ impl WithdrawReasons { } /// Simple amalgamation trait to collect together properties for an AssetId under one roof. -pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug {} -impl AssetId for T {} +pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug + scale_info::TypeInfo {} +impl AssetId for T {} /// Simple amalgamation trait to collect together properties for a Balance under one roof. pub trait Balance: From 129c16b294f9bb904ce04da78ada98bf168421be Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 18 Oct 2021 09:12:22 +0200 Subject: [PATCH 002/162] CI: remove node-template from build-linux-substrate-simnet job (#10034) * ci: remove node-template from build-linux-substrate-simnet * build-linux-substrate job impovements * small fix --- .gitlab-ci.yml | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d36fe2b57add7..6d4362ea93629 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -178,6 +178,21 @@ default: | tee artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json' - sccache -s +.build-linux-substrate-script: &build-linux-substrate-script + - WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose + - mv ./target/release/substrate ./artifacts/substrate/. + - echo -n "Substrate version = " + - if [ "${CI_COMMIT_TAG}" ]; then + echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; + else + ./artifacts/substrate/substrate --version | + sed -n -E 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' | + tee ./artifacts/substrate/VERSION; + fi + - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 + - cp -r .maintain/docker/substrate.Dockerfile ./artifacts/substrate/ + - sccache -s + #### Vault secrets .vault-secrets: &vault-secrets secrets: @@ -571,7 +586,8 @@ check-dependent-cumulus: variables: DEPENDENT_REPO: cumulus -build-linux-substrate: &build-binary + +build-linux-substrate: stage: build <<: *collect-artifacts <<: *docker-env @@ -582,27 +598,21 @@ build-linux-substrate: &build-binary before_script: - mkdir -p ./artifacts/substrate/ script: - - WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose - - mv ./target/release/substrate ./artifacts/substrate/. - - echo -n "Substrate version = " - - if [ "${CI_COMMIT_TAG}" ]; then - echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; - else - ./artifacts/substrate/substrate --version | - sed -n -E 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' | - tee ./artifacts/substrate/VERSION; - fi - - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 + - *build-linux-substrate-script - printf '\n# building node-template\n\n' - ./.maintain/node-template-release.sh ./artifacts/substrate/substrate-node-template.tar.gz - - cp -r .maintain/docker/substrate.Dockerfile ./artifacts/substrate/ - - sccache -s + #Build binary for simnet quick tests. build-linux-substrate-simnet: - <<: *build-binary + stage: build <<: *collect-artifacts-short + <<: *docker-env <<: *test-refs-no-trigger-prs-only + before_script: + - mkdir -p ./artifacts/substrate/ + script: + - *build-linux-substrate-script build-linux-subkey: &build-subkey stage: build From 4a99c091a97bf829e17a53e1a22a87ddc7b4df22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 18 Oct 2021 10:18:13 +0200 Subject: [PATCH 003/162] Make duration calculation robust against clock drift (#10042) It is possible that `Instant::now()` is returning an earlier clock time when being called a second time. To guard against this, we should use `saturating_duration_since`. --- client/db/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 549ef4012a739..3b8936c0f7bac 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1076,13 +1076,14 @@ impl FrozenForDuration { F: FnOnce() -> T, { let mut lock = self.value.lock(); - if lock.at.elapsed() > self.duration || lock.value.is_none() { + let now = std::time::Instant::now(); + if now.saturating_duration_since(lock.at) > self.duration || lock.value.is_none() { let new_value = f(); - lock.at = std::time::Instant::now(); + lock.at = now; lock.value = Some(new_value.clone()); new_value } else { - lock.value.as_ref().expect("checked with lock above").clone() + lock.value.as_ref().expect("Checked with in branch above; qed").clone() } } } From abeea6d04d72eddb136ec494298035977c9cdbe4 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 18 Oct 2021 11:19:35 +0200 Subject: [PATCH 004/162] Update lowest unbaked storage. (#9750) * update lowest unbaked * fix format * add note * fmt --- frame/democracy/src/lib.rs | 32 ++++++++++++----- frame/democracy/src/tests.rs | 2 +- frame/democracy/src/tests/cancellation.rs | 4 +++ frame/democracy/src/tests/scheduling.rs | 44 +++++++++++++++++++++++ 4 files changed, 72 insertions(+), 10 deletions(-) diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 50b245006fa24..893e4676bef7b 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -613,10 +613,7 @@ pub mod pallet { impl Hooks> for Pallet { /// Weight: see `begin_block` fn on_initialize(n: T::BlockNumber) -> Weight { - Self::begin_block(n).unwrap_or_else(|e| { - sp_runtime::print(e); - 0 - }) + Self::begin_block(n) } } @@ -1682,7 +1679,7 @@ impl Pallet { now: T::BlockNumber, index: ReferendumIndex, status: ReferendumStatus>, - ) -> Result { + ) -> bool { let total_issuance = T::Currency::total_issuance(); let approved = status.threshold.approved(status.tally, total_issuance); @@ -1719,7 +1716,7 @@ impl Pallet { Self::deposit_event(Event::::NotPassed(index)); } - Ok(approved) + approved } /// Current era is ending; we should finish up any proposals. @@ -1734,7 +1731,7 @@ impl Pallet { /// - Db writes: `PublicProps`, `account`, `ReferendumCount`, `DepositOf`, `ReferendumInfoOf` /// - Db reads per R: `DepositOf`, `ReferendumInfoOf` /// # - fn begin_block(now: T::BlockNumber) -> Result { + fn begin_block(now: T::BlockNumber) -> Weight { let max_block_weight = T::BlockWeights::get().max_block; let mut weight = 0; @@ -1758,12 +1755,29 @@ impl Pallet { // tally up votes for any expiring referenda. for (index, info) in Self::maturing_referenda_at_inner(now, next..last).into_iter() { - let approved = Self::bake_referendum(now, index, info)?; + let approved = Self::bake_referendum(now, index, info); ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); weight = max_block_weight; } - Ok(weight) + // Notes: + // * We don't consider the lowest unbaked to be the last maturing in case some refendum have + // longer voting period than others. + // * The iteration here shouldn't trigger any storage read that are not in cache, due to + // `maturing_referenda_at_inner` having already read them. + // * We shouldn't iterate more than `LaunchPeriod/VotingPeriod + 1` times because the number + // of unbaked referendum is bounded by this number. In case those number have changed in a + // runtime upgrade the formula should be adjusted but the bound should still be sensible. + >::mutate(|ref_index| { + while *ref_index < last && + Self::referendum_info(*ref_index) + .map_or(true, |info| matches!(info, ReferendumInfo::Finished { .. })) + { + *ref_index += 1 + } + }); + + weight } /// Reads the length of account in DepositOf without getting the complete value in the runtime. diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 75104db51b971..f56667e9094b3 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -264,7 +264,7 @@ fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchRes fn next_block() { System::set_block_number(System::block_number() + 1); Scheduler::on_initialize(System::block_number()); - assert!(Democracy::begin_block(System::block_number()).is_ok()); + Democracy::begin_block(System::block_number()); } fn fast_forward_to(n: u64) { diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index c2bd725ce934a..83822bf51829f 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -30,10 +30,14 @@ fn cancel_referendum_should_work() { ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_ok!(Democracy::cancel_referendum(Origin::root(), r.into())); + assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); + next_block(); + assert_eq!(Democracy::lowest_unbaked(), 1); + assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); assert_eq!(Balances::free_balance(42), 0); }); } diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index 06b492bc6093c..5c857a632b97b 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -30,8 +30,10 @@ fn simple_passing_should_work() { ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); + assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); next_block(); + assert_eq!(Democracy::lowest_unbaked(), 1); assert_eq!(Balances::free_balance(42), 2); }); } @@ -110,3 +112,45 @@ fn delayed_enactment_should_work() { assert_eq!(Balances::free_balance(42), 2); }); } + +#[test] +fn lowest_unbaked_should_be_sensible() { + new_test_ext().execute_with(|| { + let r1 = Democracy::inject_referendum( + 3, + set_balance_proposal_hash_and_note(1), + VoteThreshold::SuperMajorityApprove, + 0, + ); + let r2 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + let r3 = Democracy::inject_referendum( + 10, + set_balance_proposal_hash_and_note(3), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); + // r3 is canceled + assert_ok!(Democracy::cancel_referendum(Origin::root(), r3.into())); + assert_eq!(Democracy::lowest_unbaked(), 0); + + next_block(); + + // r2 is approved + assert_eq!(Balances::free_balance(42), 2); + assert_eq!(Democracy::lowest_unbaked(), 0); + + next_block(); + + // r1 is approved + assert_eq!(Balances::free_balance(42), 1); + assert_eq!(Democracy::lowest_unbaked(), 3); + assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + }); +} From d1ec40b49847fdb6dae8f00ab7ab08cef7dd10fc Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Mon, 18 Oct 2021 04:42:56 -0600 Subject: [PATCH 005/162] Clarify wieght traits needed to impl in example (#9842) * Clarify wieght traits needed to impl in example * Update frame/example/src/lib.rs * Update frame/example/src/lib.rs Co-authored-by: Guillaume Thiolliere * fmt Co-authored-by: Squirrel Co-authored-by: Guillaume Thiolliere --- frame/example/src/lib.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index f0b58e9aec153..981274b1ba739 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -486,11 +486,12 @@ pub mod pallet { // the chain in a moderate rate. // // The parenthesized value of the `#[pallet::weight(..)]` attribute can be any type that - // implements a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. - // The former conveys the weight (a numeric representation of pure execution time and - // difficulty) of the transaction and the latter demonstrates the [`DispatchClass`] of the - // call. A higher weight means a larger transaction (less of which can be placed in a - // single block). + // implements a set of traits, namely [`WeighData`], [`ClassifyDispatch`], and + // [`PaysFee`]. The first conveys the weight (a numeric representation of pure + // execution time and difficulty) of the transaction and the second demonstrates the + // [`DispatchClass`] of the call, the third gives whereas extrinsic must pay fees or not. + // A higher weight means a larger transaction (less of which can be placed in a single + // block). // // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the // benchmark toolchain. From 781454d981a144d31f6cd1628cccd233b34d468a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 19 Oct 2021 13:56:09 +0200 Subject: [PATCH 006/162] Don't print "Discovered new external" line for private IPs (#10055) --- client/network/src/discovery.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 1ed08cd671d4e..2a4b25a621e04 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -599,14 +599,16 @@ impl NetworkBehaviour for DiscoveryBehaviour { fn inject_new_external_addr(&mut self, addr: &Multiaddr) { let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id.into())); - // NOTE: we might re-discover the same address multiple times - // in which case we just want to refrain from logging. - if self.known_external_addresses.insert(new_addr.clone()) { - info!( - target: "sub-libp2p", - "🔍 Discovered new external address for our node: {}", - new_addr, - ); + if self.can_add_to_dht(addr) { + // NOTE: we might re-discover the same address multiple times + // in which case we just want to refrain from logging. + if self.known_external_addresses.insert(new_addr.clone()) { + info!( + target: "sub-libp2p", + "🔍 Discovered new external address for our node: {}", + new_addr, + ); + } } for k in self.kademlias.values_mut() { From 29938120830e13a7a9f0f4daa71ca96e03c3002a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Paulo=20Silva=20de=20Souza?= <77391175+joao-paulo-parity@users.noreply.github.com> Date: Tue, 19 Oct 2021 14:05:05 -0300 Subject: [PATCH 007/162] remove hardcoded pipeline scripts tag (#10061) the tag will be moved to Gitlab CI/CD variables --- .gitlab-ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6d4362ea93629..75fdf024d5bdf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -46,7 +46,6 @@ variables: &default-vars VAULT_AUTH_PATH: "gitlab-parity-io-jwt" VAULT_AUTH_ROLE: "cicd_gitlab_parity_${CI_PROJECT_NAME}" SIMNET_FEATURES_PATH: "simnet_tests/tests" - PIPELINE_SCRIPTS_TAG: "v0.1" default: cache: {} From 72a8f9fc6e25a82636b7d4b35ce98f79ec29ec38 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Tue, 19 Oct 2021 19:52:50 +0200 Subject: [PATCH 008/162] Slightly improved documentation. (#9976) Co-authored-by: Pierre Krieger Co-authored-by: Giles Cope --- client/network/src/service.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 9b6e54f37a663..90e647505fa1f 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1097,6 +1097,15 @@ impl NetworkService { /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also /// consist of only `/p2p/`. /// + /// The node will start establishing/accepting connections and substreams to/from peers in this + /// set, if it doesn't have any substream open with them yet. + /// + /// Note however, if a call to this function results in less peers on the reserved set, they + /// will not necessarily get disconnected (depending on available free slots in the peer set). + /// If you want to also disconnect those removed peers, you will have to call + /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit + /// this step if the peer set is in reserved only mode. + /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). pub fn set_reserved_peers( From dcad42f9b51130621e24c3a93ca1075cd1bb9d15 Mon Sep 17 00:00:00 2001 From: Jay Pavlina Date: Tue, 19 Oct 2021 17:30:46 -0500 Subject: [PATCH 009/162] Derive Encode/Decode for BlockId (#10063) --- primitives/runtime/src/generic/block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index 21a01933bc691..68959cc514d94 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -35,7 +35,7 @@ use sp_core::RuntimeDebug; use sp_std::prelude::*; /// Something to identify a block. -#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] From 11754462c426c782bd7eb0416b4eddee5c475439 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Thu, 21 Oct 2021 10:29:10 +0200 Subject: [PATCH 010/162] Allow pallet's info to be enumerated (#10053) * Allow pallet's info to be enumerated * Fixes * Formatting * Flat tuple for getting all pallet instances * Renaming and fixing reversedness * Formatting * Fixes * Back to nesting * Back to nestingx * Revert executive lib * Reversions * Reversions * Fixes * Fixes * Formatting * Fixes * Spelling * Comments --- frame/executive/README.md | 17 +++- .../procedural/src/construct_runtime/mod.rs | 1 + .../src/pallet/expand/pallet_struct.rs | 19 ++++ frame/support/src/dispatch.rs | 16 ++++ frame/support/src/lib.rs | 4 +- frame/support/src/migrations.rs | 4 +- frame/support/src/traits.rs | 3 +- frame/support/src/traits/metadata.rs | 88 +++++++++++++++++++ frame/support/test/tests/pallet_instance.rs | 42 +++++++++ 9 files changed, 187 insertions(+), 7 deletions(-) diff --git a/frame/executive/README.md b/frame/executive/README.md index ae3bbf1a9d994..e96d07b0843f2 100644 --- a/frame/executive/README.md +++ b/frame/executive/README.md @@ -35,7 +35,13 @@ The default Substrate node template declares the [`Executive`](https://docs.rs/f ```rust # /// Executive: handles dispatch to the various modules. -pub type Executive = executive::Executive; +pub type Executive = executive::Executive< + Runtime, + Block, + Context, + Runtime, + AllPallets, +>; ``` ### Custom `OnRuntimeUpgrade` logic @@ -54,7 +60,14 @@ impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { } } -pub type Executive = executive::Executive; +pub type Executive = executive::Executive< + Runtime, + Block, + Context, + Runtime, + AllPallets, + CustomOnRuntimeUpgrade, +>; ``` License: Apache-2.0 diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 04bb2ead645d2..863df34266591 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -217,6 +217,7 @@ fn decl_all_pallets<'a>( quote!( #types + /// All pallets included in the runtime as a nested tuple of types. /// Excludes the System pallet. pub type AllPallets = ( #all_pallets ); diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 57e814b6b8438..96dfdbb4b6f2d 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -233,6 +233,25 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> #frame_support::traits::PalletsInfoAccess + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn count() -> usize { 1 } + fn accumulate( + acc: &mut #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> + ) { + use #frame_support::traits::PalletInfoAccess; + let item = #frame_support::traits::PalletInfoData { + index: Self::index(), + name: Self::name(), + module_name: Self::module_name(), + crate_version: Self::crate_version(), + }; + acc.push(item); + } + } + #storage_info ) } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 6dc7fb8a94cae..a492bc12f6a38 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2165,6 +2165,22 @@ macro_rules! decl_module { } } + impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::PalletsInfoAccess + for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* + { + fn count() -> usize { 1 } + fn accumulate(acc: &mut $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData>) { + use $crate::traits::PalletInfoAccess; + let item = $crate::traits::PalletInfoData { + index: Self::index(), + name: Self::name(), + module_name: Self::module_name(), + crate_version: Self::crate_version(), + }; + acc.push(item); + } + } + // Implement GetCallName for the Call. impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::GetCallName for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index f3b00c764bb35..1b93b5fb5975e 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1476,11 +1476,11 @@ pub mod pallet_prelude { /// * [`traits::OnGenesis`]: contains some logic to write pallet version into storage. /// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. /// -/// It declare `type Module` type alias for `Pallet`, used by [`construct_runtime`]. +/// It declares `type Module` type alias for `Pallet`, used by [`construct_runtime`]. /// /// It implements [`traits::PalletInfoAccess`] on `Pallet` to ease access to pallet /// informations given by [`frame_support::traits::PalletInfo`]. -/// (The implementation use the associated type `frame_system::Config::PalletInfo`). +/// (The implementation uses the associated type `frame_system::Config::PalletInfo`). /// /// It implements [`traits::StorageInfoTrait`] on `Pallet` which give information about all /// storages. diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index dc3402440fdd4..c61cbac62a16b 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -58,9 +58,9 @@ impl PalletVersionToStorageVersionHelper for T { /// /// This will remove all `PalletVersion's` from the state and insert the current storage version. pub fn migrate_from_pallet_version_to_storage_version< - AllPallets: PalletVersionToStorageVersionHelper, + Pallets: PalletVersionToStorageVersionHelper, >( db_weight: &RuntimeDbWeight, ) -> Weight { - AllPallets::migrate(db_weight) + Pallets::migrate(db_weight) } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 5ac0208dc2033..513267c5c8ba6 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -63,7 +63,8 @@ pub use randomness::Randomness; mod metadata; pub use metadata::{ CallMetadata, CrateVersion, GetCallMetadata, GetCallName, GetStorageVersion, PalletInfo, - PalletInfoAccess, StorageVersion, STORAGE_VERSION_STORAGE_KEY_POSTFIX, + PalletInfoAccess, PalletInfoData, PalletsInfoAccess, StorageVersion, + STORAGE_VERSION_STORAGE_KEY_POSTFIX, }; mod hooks; diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index e60cf8be8a41c..0da76f7585aca 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -19,6 +19,7 @@ use codec::{Decode, Encode}; use sp_runtime::RuntimeDebug; +use sp_std::prelude::*; /// Provides information about the pallet itself and its setup in the runtime. /// @@ -35,6 +36,19 @@ pub trait PalletInfo { fn crate_version() -> Option; } +/// Information regarding an instance of a pallet. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug)] +pub struct PalletInfoData { + /// Index of the pallet as configured in the runtime. + pub index: usize, + /// Name of the pallet as configured in the runtime. + pub name: &'static str, + /// Name of the Rust module containing the pallet. + pub module_name: &'static str, + /// Version of the crate containing the pallet. + pub crate_version: CrateVersion, +} + /// Provides information about the pallet itself and its setup in the runtime. /// /// Declare some information and access the information provided by [`PalletInfo`] for a specific @@ -50,6 +64,49 @@ pub trait PalletInfoAccess { fn crate_version() -> CrateVersion; } +/// Provide information about a bunch of pallets. +pub trait PalletsInfoAccess { + /// The number of pallets' information that this type represents. + /// + /// You probably don't want this function but `infos()` instead. + fn count() -> usize { + 0 + } + + /// Extend the given vector by all of the pallets' information that this type represents. + /// + /// You probably don't want this function but `infos()` instead. + fn accumulate(_accumulator: &mut Vec) {} + + /// All of the pallets' information that this type represents. + fn infos() -> Vec { + let mut result = Vec::with_capacity(Self::count()); + Self::accumulate(&mut result); + result + } +} + +impl PalletsInfoAccess for () {} +impl PalletsInfoAccess for (T,) { + fn count() -> usize { + T::count() + } + fn accumulate(acc: &mut Vec) { + T::accumulate(acc) + } +} + +impl PalletsInfoAccess for (T1, T2) { + fn count() -> usize { + T1::count() + T2::count() + } + fn accumulate(acc: &mut Vec) { + // The AllPallets type tuplises the pallets in reverse order, so we unreverse them here. + T2::accumulate(acc); + T1::accumulate(acc); + } +} + /// The function and pallet name of the Call. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] pub struct CallMetadata { @@ -206,6 +263,37 @@ pub trait GetStorageVersion { mod tests { use super::*; + struct Pallet1; + impl PalletInfoAccess for Pallet1 { + fn index() -> usize { + 1 + } + fn name() -> &'static str { + "Pallet1" + } + fn module_name() -> &'static str { + "pallet1" + } + fn crate_version() -> CrateVersion { + CrateVersion::new(1, 0, 0) + } + } + struct Pallet2; + impl PalletInfoAccess for Pallet2 { + fn index() -> usize { + 2 + } + fn name() -> &'static str { + "Pallet2" + } + fn module_name() -> &'static str { + "pallet2" + } + fn crate_version() -> CrateVersion { + CrateVersion::new(1, 0, 0) + } + } + #[test] fn check_storage_version_ordering() { let version = StorageVersion::new(1); diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 34586e8414216..3a1009402d6f2 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -505,6 +505,48 @@ fn storage_expand() { }); } +#[test] +fn pallet_metadata_expands() { + use frame_support::traits::{CrateVersion, PalletInfoData, PalletsInfoAccess}; + let mut infos = AllPalletsWithSystem::infos(); + infos.sort_by_key(|x| x.index); + assert_eq!( + infos, + vec![ + PalletInfoData { + index: 0, + name: "System", + module_name: "frame_system", + crate_version: CrateVersion { major: 4, minor: 0, patch: 0 }, + }, + PalletInfoData { + index: 1, + name: "Example", + module_name: "pallet", + crate_version: CrateVersion { major: 3, minor: 0, patch: 0 }, + }, + PalletInfoData { + index: 2, + name: "Instance1Example", + module_name: "pallet", + crate_version: CrateVersion { major: 3, minor: 0, patch: 0 }, + }, + PalletInfoData { + index: 3, + name: "Example2", + module_name: "pallet2", + crate_version: CrateVersion { major: 3, minor: 0, patch: 0 }, + }, + PalletInfoData { + index: 4, + name: "Instance1Example2", + module_name: "pallet2", + crate_version: CrateVersion { major: 3, minor: 0, patch: 0 }, + }, + ] + ); +} + #[test] fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { From 05af3c2c06caa85485e0c8eaa1c829da1a379c84 Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Thu, 21 Oct 2021 15:44:46 +0300 Subject: [PATCH 011/162] Temporarily disable `node-bench-regression-guard` (#10075) --- .gitlab-ci.yml | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 75fdf024d5bdf..f49211c238c0c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -364,28 +364,29 @@ cargo-check-benches: script: - *cargo-check-benches-script -node-bench-regression-guard: - # it's not belong to `build` semantically, but dag jobs can't depend on each other - # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 - # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 - stage: build - <<: *docker-env - <<: *test-refs-no-trigger-prs-only - needs: - # this is a DAG - - job: cargo-check-benches - artifacts: true - # this does not like a DAG, just polls the artifact - - project: $CI_PROJECT_PATH - job: cargo-check-benches - ref: master - artifacts: true - variables: - CI_IMAGE: "paritytech/node-bench-regression-guard:latest" - before_script: [""] - script: - - 'node-bench-regression-guard --reference artifacts/benches/master-* - --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' +# TODO: re-enable after dedicated bench hosts provisioning +# node-bench-regression-guard: +# # it's not belong to `build` semantically, but dag jobs can't depend on each other +# # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 +# # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 +# stage: build +# <<: *docker-env +# <<: *test-refs-no-trigger-prs-only +# needs: +# # this is a DAG +# - job: cargo-check-benches +# artifacts: true +# # this does not like a DAG, just polls the artifact +# - project: $CI_PROJECT_PATH +# job: cargo-check-benches +# ref: master +# artifacts: true +# variables: +# CI_IMAGE: "paritytech/node-bench-regression-guard:latest" +# before_script: [""] +# script: +# - 'node-bench-regression-guard --reference artifacts/benches/master-* +# --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' cargo-check-subkey: stage: test From 485e592ee2ef4db499134e9a903c02574d731593 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Paulo=20Silva=20de=20Souza?= <77391175+joao-paulo-parity@users.noreply.github.com> Date: Thu, 21 Oct 2021 10:03:50 -0300 Subject: [PATCH 012/162] remove logging from the check-dependent-* job (#10076) such command should belong to the script rather than the job --- .gitlab-ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f49211c238c0c..6ad2ff1a46d8f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -570,7 +570,6 @@ cargo-check-macos: --substrate "$DEPENDENT_REPO" "$GITHUB_PR_TOKEN" - - cd "$DEPENDENT_REPO" && git rev-parse --abbrev-ref HEAD # Individual jobs are set up for each dependent project so that they can be ran in parallel. # Arguably we could generate a job for each companion in the PR's description using Gitlab's From 632b32300eb9376767c2ae7b38e79b3f7f5329b1 Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 22 Oct 2021 01:54:15 +0900 Subject: [PATCH 013/162] Speed up logging once again (#9981) * Update `tracing`-related dependencies * Enable `parking_lot` feature in `tracing-subscriber` * Add an asynchronous stderr logger * Make clippy happy * Add an integration test for the logger * Refactor `test_logger_filters`'s subprocess machinery into a separate function * Use a child process instead of hooking into stderr for the test * Add a doc comment for `MakeStderrWriter` * Move the initialization into the `MakeStderrWriter`'s constructor * Add an extra test case to trigger the logger's emergency flush mechanism * Use the buffer's mutex for asynchronous flushes * Remove vestigial `nix` dependency from one of the previous commits --- Cargo.lock | 10 +- client/tracing/Cargo.toml | 3 +- client/tracing/src/logging/directives.rs | 2 +- client/tracing/src/logging/mod.rs | 147 +++++++++++-- client/tracing/src/logging/stderr_writer.rs | 228 ++++++++++++++++++++ primitives/tracing/Cargo.toml | 4 +- 6 files changed, 373 insertions(+), 21 deletions(-) create mode 100644 client/tracing/src/logging/stderr_writer.rs diff --git a/Cargo.lock b/Cargo.lock index f15e363bfd6ae..6325304bfceec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4085,9 +4085,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.6.1" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" +checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" dependencies = [ "autocfg 1.0.1", ] @@ -8466,6 +8466,7 @@ dependencies = [ "chrono", "criterion", "lazy_static", + "libc", "log 0.4.14", "once_cell", "parking_lot 0.11.1", @@ -10635,14 +10636,15 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.19" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab69019741fca4d98be3c62d2b75254528b5432233fd8a4d2739fec20278de48" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ "ansi_term 0.12.1", "chrono", "lazy_static", "matchers", + "parking_lot 0.11.1", "regex", "serde", "serde_json", diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index b4049fa097ff8..4939e6a73110c 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -17,6 +17,7 @@ ansi_term = "0.12.1" atty = "0.2.13" chrono = "0.4.19" lazy_static = "1.4.0" +libc = "0.2.95" log = { version = "0.4.8" } once_cell = "1.8.0" parking_lot = "0.11.1" @@ -26,7 +27,7 @@ serde = "1.0.126" thiserror = "1.0.21" tracing = "0.1.29" tracing-log = "0.1.2" -tracing-subscriber = "0.2.19" +tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] } sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } sp-rpc = { version = "4.0.0-dev", path = "../../primitives/rpc" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } diff --git a/client/tracing/src/logging/directives.rs b/client/tracing/src/logging/directives.rs index 16f68654de1eb..fe7d6a780dbf0 100644 --- a/client/tracing/src/logging/directives.rs +++ b/client/tracing/src/logging/directives.rs @@ -109,5 +109,5 @@ pub(crate) fn set_reload_handle(handle: Handle) { type SCSubscriber< N = tracing_fmt::format::DefaultFields, E = crate::logging::EventFormat, - W = fn() -> std::io::Stderr, + W = crate::logging::DefaultLogger, > = layer::Layered, Registry>; diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index c6a4f070176e8..7f995615a223b 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -26,6 +26,9 @@ mod directives; mod event_format; mod fast_local_time; mod layers; +mod stderr_writer; + +pub(crate) type DefaultLogger = stderr_writer::MakeStderrWriter; pub use directives::*; pub use sc_tracing_proc_macro::*; @@ -47,6 +50,8 @@ pub use event_format::*; pub use fast_local_time::FastLocalTime; pub use layers::*; +use stderr_writer::MakeStderrWriter; + /// Logging Result typedef. pub type Result = std::result::Result; @@ -91,7 +96,7 @@ fn prepare_subscriber( profiling_targets: Option<&str>, force_colors: Option, builder_hook: impl Fn( - SubscriberBuilder std::io::Stderr>, + SubscriberBuilder, ) -> SubscriberBuilder, ) -> Result LookupSpan<'a>> where @@ -172,7 +177,7 @@ where let builder = builder.with_span_events(format::FmtSpan::NONE); - let builder = builder.with_writer(std::io::stderr as _); + let builder = builder.with_writer(MakeStderrWriter::default()); let builder = builder.event_format(event_format); @@ -282,7 +287,16 @@ impl LoggerBuilder { mod tests { use super::*; use crate as sc_tracing; - use std::{env, process::Command}; + use log::info; + use std::{ + collections::BTreeMap, + env, + process::Command, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, + }; use tracing::{metadata::Kind, subscriber::Interest, Callsite, Level, Metadata}; const EXPECTED_LOG_MESSAGE: &'static str = "yeah logging works as expected"; @@ -292,9 +306,28 @@ mod tests { let _ = LoggerBuilder::new(directives).init().unwrap(); } + fn run_test_in_another_process( + test_name: &str, + test_body: impl FnOnce(), + ) -> Option { + if env::var("RUN_FORKED_TEST").is_ok() { + test_body(); + None + } else { + let output = Command::new(env::current_exe().unwrap()) + .arg(test_name) + .env("RUN_FORKED_TEST", "1") + .output() + .unwrap(); + + assert!(output.status.success()); + Some(output) + } + } + #[test] fn test_logger_filters() { - if env::var("RUN_TEST_LOGGER_FILTERS").is_ok() { + run_test_in_another_process("test_logger_filters", || { let test_directives = "afg=debug,sync=trace,client=warn,telemetry,something-with-dash=error"; init_logger(&test_directives); @@ -331,15 +364,7 @@ mod tests { assert!(test_filter("telemetry", Level::TRACE)); assert!(test_filter("something-with-dash", Level::ERROR)); }); - } else { - let status = Command::new(env::current_exe().unwrap()) - .arg("test_logger_filters") - .env("RUN_TEST_LOGGER_FILTERS", "1") - .output() - .unwrap() - .status; - assert!(status.success()); - } + }); } /// This test ensures that using dash (`-`) in the target name in logs and directives actually @@ -474,4 +499,100 @@ mod tests { assert_eq!("MAX_LOG_LEVEL=Trace", run_test(None, Some("test=info".into()))); } } + + // This creates a bunch of threads and makes sure they start executing + // a given callback almost exactly at the same time. + fn run_on_many_threads(thread_count: usize, callback: impl Fn(usize) + 'static + Send + Clone) { + let started_count = Arc::new(AtomicUsize::new(0)); + let barrier = Arc::new(AtomicBool::new(false)); + let threads: Vec<_> = (0..thread_count) + .map(|nth_thread| { + let started_count = started_count.clone(); + let barrier = barrier.clone(); + let callback = callback.clone(); + + std::thread::spawn(move || { + started_count.fetch_add(1, Ordering::SeqCst); + while !barrier.load(Ordering::SeqCst) { + std::thread::yield_now(); + } + + callback(nth_thread); + }) + }) + .collect(); + + while started_count.load(Ordering::SeqCst) != thread_count { + std::thread::yield_now(); + } + barrier.store(true, Ordering::SeqCst); + + for thread in threads { + if let Err(error) = thread.join() { + println!("error: failed to join thread: {:?}", error); + unsafe { libc::abort() } + } + } + } + + #[test] + fn parallel_logs_from_multiple_threads_are_properly_gathered() { + const THREAD_COUNT: usize = 128; + const LOGS_PER_THREAD: usize = 1024; + + let output = run_test_in_another_process( + "parallel_logs_from_multiple_threads_are_properly_gathered", + || { + let builder = LoggerBuilder::new(""); + builder.init().unwrap(); + + run_on_many_threads(THREAD_COUNT, |nth_thread| { + for _ in 0..LOGS_PER_THREAD { + info!("Thread <<{}>>", nth_thread); + } + }); + }, + ); + + if let Some(output) = output { + let stderr = String::from_utf8(output.stderr).unwrap(); + let mut count_per_thread = BTreeMap::new(); + for line in stderr.split("\n") { + if let Some(index_s) = line.find("Thread <<") { + let index_s = index_s + "Thread <<".len(); + let index_e = line.find(">>").unwrap(); + let nth_thread: usize = line[index_s..index_e].parse().unwrap(); + *count_per_thread.entry(nth_thread).or_insert(0) += 1; + } + } + + assert_eq!(count_per_thread.len(), THREAD_COUNT); + for (_, count) in count_per_thread { + assert_eq!(count, LOGS_PER_THREAD); + } + } + } + + #[test] + fn huge_single_line_log_is_properly_printed_out() { + let mut line = String::new(); + line.push_str("$$START$$"); + for n in 0..16 * 1024 * 1024 { + let ch = b'a' + (n as u8 % (b'z' - b'a')); + line.push(char::from(ch)); + } + line.push_str("$$END$$"); + + let output = + run_test_in_another_process("huge_single_line_log_is_properly_printed_out", || { + let builder = LoggerBuilder::new(""); + builder.init().unwrap(); + info!("{}", line); + }); + + if let Some(output) = output { + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!(stderr.contains(&line)); + } + } } diff --git a/client/tracing/src/logging/stderr_writer.rs b/client/tracing/src/logging/stderr_writer.rs new file mode 100644 index 0000000000000..9aab2491fb872 --- /dev/null +++ b/client/tracing/src/logging/stderr_writer.rs @@ -0,0 +1,228 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! This module contains a buffered semi-asynchronous stderr writer. +//! +//! Depending on how we were started writing to stderr can take a surprisingly long time. +//! +//! If the other side takes their sweet sweet time reading whatever we send them then writing +//! to stderr might block for a long time, since it is effectively a synchronous operation. +//! And every time we write to stderr we need to grab a global lock, which affects every thread +//! which also tries to log something at the same time. +//! +//! Of course we *will* be ultimately limited by how fast the recipient can ingest our logs, +//! but it's not like logging is the only thing we're doing. And we still can't entirely +//! avoid the problem of multiple threads contending for the same lock. (Well, technically +//! we could employ something like a lock-free circular buffer, but that might be like +//! killing a fly with a sledgehammer considering the complexity involved; this is only +//! a logger after all.) +//! +//! But we can try to make things a little better. We can offload actually writing to stderr +//! to another thread and flush the logs in bulk instead of doing it per-line, which should +//! reduce the amount of CPU time we waste on making syscalls and on spinning waiting for locks. +//! +//! How much this helps depends on a multitude of factors, including the hardware we're running on, +//! how much we're logging, from how many threads, which exact set of threads are logging, to what +//! stderr is actually connected to (is it a terminal emulator? a file? an UDP socket?), etc. +//! +//! In general this can reduce the real time execution time as much as 75% in certain cases, or it +//! can make absolutely no difference in others. + +use parking_lot::{Condvar, Mutex, Once}; +use std::{ + io::Write, + sync::atomic::{AtomicBool, Ordering}, + time::Duration, +}; +use tracing::{Level, Metadata}; + +/// How many bytes of buffered logs will trigger an async flush on another thread? +const ASYNC_FLUSH_THRESHOLD: usize = 16 * 1024; + +/// How many bytes of buffered logs will trigger a sync flush on the current thread? +const SYNC_FLUSH_THRESHOLD: usize = 768 * 1024; + +/// How many bytes can be buffered at maximum? +const EMERGENCY_FLUSH_THRESHOLD: usize = 2 * 1024 * 1024; + +/// If there isn't enough printed out this is how often the logs will be automatically flushed. +const AUTOFLUSH_EVERY: Duration = Duration::from_millis(50); + +/// The least serious level at which a synchronous flush will be triggered. +const SYNC_FLUSH_LEVEL_THRESHOLD: Level = Level::ERROR; + +/// The amount of time we'll block until the buffer is fully flushed on exit. +/// +/// This should be completely unnecessary in normal circumstances. +const ON_EXIT_FLUSH_TIMEOUT: Duration = Duration::from_secs(5); + +/// A global buffer to which we'll append all of our logs before flushing them out to stderr. +static BUFFER: Mutex> = parking_lot::const_mutex(Vec::new()); + +/// A spare buffer which we'll swap with the main buffer on each flush to minimize lock contention. +static SPARE_BUFFER: Mutex> = parking_lot::const_mutex(Vec::new()); + +/// A conditional variable used to forcefully trigger asynchronous flushes. +static ASYNC_FLUSH_CONDVAR: Condvar = Condvar::new(); + +static ENABLE_ASYNC_LOGGING: AtomicBool = AtomicBool::new(true); + +fn flush_logs(mut buffer: parking_lot::lock_api::MutexGuard>) { + let mut spare_buffer = SPARE_BUFFER.lock(); + std::mem::swap(&mut *spare_buffer, &mut *buffer); + std::mem::drop(buffer); + + let stderr = std::io::stderr(); + let mut stderr_lock = stderr.lock(); + let _ = stderr_lock.write_all(&*spare_buffer); + std::mem::drop(stderr_lock); + + spare_buffer.clear(); +} + +fn log_autoflush_thread() { + let mut buffer = BUFFER.lock(); + loop { + ASYNC_FLUSH_CONDVAR.wait_for(&mut buffer, AUTOFLUSH_EVERY); + loop { + flush_logs(buffer); + + buffer = BUFFER.lock(); + if buffer.len() >= ASYNC_FLUSH_THRESHOLD { + // While we were busy flushing we picked up enough logs to do another flush. + continue + } else { + break + } + } + } +} + +#[cold] +fn initialize() { + std::thread::Builder::new() + .name("log-autoflush".to_owned()) + .spawn(log_autoflush_thread) + .expect("thread spawning doesn't normally fail; qed"); + + // SAFETY: This is safe since we pass a valid pointer to `atexit`. + let errcode = unsafe { libc::atexit(on_exit) }; + assert_eq!(errcode, 0, "atexit failed while setting up the logger: {}", errcode); +} + +extern "C" fn on_exit() { + ENABLE_ASYNC_LOGGING.store(false, Ordering::SeqCst); + + if let Some(buffer) = BUFFER.try_lock_for(ON_EXIT_FLUSH_TIMEOUT) { + flush_logs(buffer); + } +} + +/// A drop-in replacement for [`std::io::stderr`] for use anywhere +/// a [`tracing_subscriber::fmt::MakeWriter`] is accepted. +pub struct MakeStderrWriter { + // A dummy field so that the structure is not publicly constructible. + _dummy: (), +} + +impl Default for MakeStderrWriter { + fn default() -> Self { + static ONCE: Once = Once::new(); + ONCE.call_once(initialize); + MakeStderrWriter { _dummy: () } + } +} + +impl tracing_subscriber::fmt::MakeWriter for MakeStderrWriter { + type Writer = StderrWriter; + + fn make_writer(&self) -> Self::Writer { + StderrWriter::new(false) + } + + // The `tracing-subscriber` crate calls this for every line logged. + fn make_writer_for(&self, meta: &Metadata<'_>) -> Self::Writer { + StderrWriter::new(*meta.level() <= SYNC_FLUSH_LEVEL_THRESHOLD) + } +} + +pub struct StderrWriter { + buffer: Option>>, + sync_flush_on_drop: bool, + original_len: usize, +} + +impl StderrWriter { + fn new(mut sync_flush_on_drop: bool) -> Self { + if !ENABLE_ASYNC_LOGGING.load(Ordering::Relaxed) { + sync_flush_on_drop = true; + } + + // This lock isn't as expensive as it might look, since this is only called once the full + // line to be logged is already serialized into a thread-local buffer inside of the + // `tracing-subscriber` crate, and basically the only thing we'll do when holding this lock + // is to copy that over to our global shared buffer in one go in `Write::write_all` and be + // immediately dropped. + let buffer = BUFFER.lock(); + StderrWriter { original_len: buffer.len(), buffer: Some(buffer), sync_flush_on_drop } + } +} + +#[cold] +fn emergency_flush(buffer: &mut Vec, input: &[u8]) { + let stderr = std::io::stderr(); + let mut stderr_lock = stderr.lock(); + let _ = stderr_lock.write_all(buffer); + buffer.clear(); + + let _ = stderr_lock.write_all(input); +} + +impl Write for StderrWriter { + fn write(&mut self, input: &[u8]) -> Result { + let buffer = self.buffer.as_mut().expect("buffer is only None after `drop`; qed"); + if buffer.len() + input.len() >= EMERGENCY_FLUSH_THRESHOLD { + // Make sure we don't blow our memory budget. Normally this should never happen, + // but there are cases where we directly print out untrusted user input which + // can potentially be megabytes in size. + emergency_flush(buffer, input); + } else { + buffer.extend_from_slice(input); + } + Ok(input.len()) + } + + fn write_all(&mut self, input: &[u8]) -> Result<(), std::io::Error> { + self.write(input).map(|_| ()) + } + + fn flush(&mut self) -> Result<(), std::io::Error> { + Ok(()) + } +} + +impl Drop for StderrWriter { + fn drop(&mut self) { + let buf = self.buffer.take().expect("buffer is only None after `drop`; qed"); + if self.sync_flush_on_drop || buf.len() >= SYNC_FLUSH_THRESHOLD { + flush_logs(buf); + } else if self.original_len < ASYNC_FLUSH_THRESHOLD && buf.len() >= ASYNC_FLUSH_THRESHOLD { + ASYNC_FLUSH_CONDVAR.notify_one(); + } + } +} diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 85eb22d6df072..46930a674f2c9 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -23,8 +23,8 @@ codec = { version = "2.0.0", package = "parity-scale-codec", default-features = "derive", ] } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.17", default-features = false } -tracing-subscriber = { version = "0.2.19", optional = true, features = [ +tracing-core = { version = "0.1.21", default-features = false } +tracing-subscriber = { version = "0.2.25", optional = true, features = [ "tracing-log", ] } From 6725823035c7926000e0a44055126977d868560b Mon Sep 17 00:00:00 2001 From: Grachev Mikhail Date: Fri, 22 Oct 2021 22:27:09 +0300 Subject: [PATCH 014/162] Remove broken links in Nicks Pallet (#10086) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix broken links in Nicks Pallet * Update frame/nicks/src/lib.rs * Update frame/nicks/src/lib.rs Co-authored-by: Bastian Köcher --- frame/nicks/src/lib.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 16c7e2042dda0..f502a683f633c 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -35,9 +35,6 @@ //! taken. //! * `clear_name` - Remove an account's associated name; the deposit is returned. //! * `kill_name` - Forcibly remove the associated name; the deposit is lost. -//! -//! [`Call`]: ./enum.Call.html -//! [`Config`]: ./trait.Config.html #![cfg_attr(not(feature = "std"), no_std)] From 969a70d1864fc5d5f6c378bcfd03f1b3ea434049 Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Sat, 23 Oct 2021 00:54:12 +0300 Subject: [PATCH 015/162] Introduce `linux-docker-benches` (#10085) * Introduce `linux-docker-benches` * Add additional info to `node-bench-regression-guard` job's run --- .gitlab-ci.yml | 50 +++++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6ad2ff1a46d8f..87efc43af2898 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -363,30 +363,34 @@ cargo-check-benches: - *rust-info-script script: - *cargo-check-benches-script + tags: + - linux-docker-benches -# TODO: re-enable after dedicated bench hosts provisioning -# node-bench-regression-guard: -# # it's not belong to `build` semantically, but dag jobs can't depend on each other -# # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 -# # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 -# stage: build -# <<: *docker-env -# <<: *test-refs-no-trigger-prs-only -# needs: -# # this is a DAG -# - job: cargo-check-benches -# artifacts: true -# # this does not like a DAG, just polls the artifact -# - project: $CI_PROJECT_PATH -# job: cargo-check-benches -# ref: master -# artifacts: true -# variables: -# CI_IMAGE: "paritytech/node-bench-regression-guard:latest" -# before_script: [""] -# script: -# - 'node-bench-regression-guard --reference artifacts/benches/master-* -# --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' +node-bench-regression-guard: + # it's not belong to `build` semantically, but dag jobs can't depend on each other + # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 + # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 + stage: build + <<: *docker-env + <<: *test-refs-no-trigger-prs-only + needs: + # this is a DAG + - job: cargo-check-benches + artifacts: true + # this does not like a DAG, just polls the artifact + - project: $CI_PROJECT_PATH + job: cargo-check-benches + ref: master + artifacts: true + variables: + CI_IMAGE: "paritytech/node-bench-regression-guard:latest" + before_script: [""] + script: + - echo "------- IMPORTANT -------" + - echo "node-bench-regression-guard depends on the results of a cargo-check-benches job" + - echo "In case of this job failure, check your pipeline's cargo-check-benches" + - 'node-bench-regression-guard --reference artifacts/benches/master-* + --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' cargo-check-subkey: stage: test From 456509d038c9535dfe6b4fc829ed1004af25a47b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 25 Oct 2021 13:29:38 +0200 Subject: [PATCH 016/162] pallet-multisig: Improve opaque call handling (#10060) * pallet-multisig: Improve opaque call handling Before the opaque call was just a type redefinition of `Vec`. With metadata v14 that was breaking external tools, as they stopped looking at the type name. To improve the situation the `WrapperKeepOpaque` type is introduced that communicates to the outside the correct type info. * Cleanup * Fix benchmarks * FMT --- frame/multisig/src/benchmarking.rs | 25 +++--- frame/multisig/src/lib.rs | 44 +++++----- frame/multisig/src/tests.rs | 129 ++++++++++++++++++++++------- frame/support/src/traits.rs | 3 +- frame/support/src/traits/misc.rs | 103 ++++++++++++++++++++++- 5 files changed, 241 insertions(+), 63 deletions(-) diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index edfeba253e5f0..1390b6eebbe34 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -29,7 +29,10 @@ use crate::Pallet as Multisig; const SEED: u32 = 0; -fn setup_multi(s: u32, z: u32) -> Result<(Vec, Vec), &'static str> { +fn setup_multi( + s: u32, + z: u32, +) -> Result<(Vec, OpaqueCall), &'static str> { let mut signatories: Vec = Vec::new(); for i in 0..s { let signatory = account("signatory", i, SEED); @@ -42,7 +45,7 @@ fn setup_multi(s: u32, z: u32) -> Result<(Vec, Vec) // Must first convert to outer call type. let call: ::Call = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); - let call_data = call.encode(); + let call_data = OpaqueCall::::from_encoded(call.encode()); return Ok((signatories, call_data)) } @@ -72,7 +75,7 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // Whitelist caller account from further DB operations. @@ -90,7 +93,7 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -109,7 +112,7 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; @@ -134,7 +137,7 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; @@ -160,7 +163,7 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; @@ -193,7 +196,7 @@ benchmarks! { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); @@ -212,7 +215,7 @@ benchmarks! { let mut signatories2 = signatories.clone(); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi @@ -245,7 +248,7 @@ benchmarks! { let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi @@ -282,7 +285,7 @@ benchmarks! { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(&call); + let call_hash = blake2_256(&call.encoded()); let timepoint = Multisig::::timepoint(); // Create the multi let o = RawOrigin::Signed(caller.clone()).into(); diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 43040ada45a98..53567cc212afd 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -56,7 +56,7 @@ use frame_support::{ DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo, }, ensure, - traits::{Currency, Get, ReservableCurrency}, + traits::{Currency, Get, ReservableCurrency, WrapperKeepOpaque}, weights::{GetDispatchInfo, Weight}, RuntimeDebug, }; @@ -74,8 +74,6 @@ pub use pallet::*; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -/// Just a bunch of bytes, but they should decode to a valid `Call`. -pub type OpaqueCall = Vec; /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular @@ -101,10 +99,12 @@ pub struct Multisig { approvals: Vec, } +type OpaqueCall = WrapperKeepOpaque<::Call>; + type CallHash = [u8; 32]; -enum CallOrHash { - Call(OpaqueCall, bool), +enum CallOrHash { + Call(OpaqueCall, bool), Hash([u8; 32]), } @@ -168,7 +168,7 @@ pub mod pallet { #[pallet::storage] pub type Calls = - StorageMap<_, Identity, [u8; 32], (OpaqueCall, T::AccountId, BalanceOf)>; + StorageMap<_, Identity, [u8; 32], (OpaqueCall, T::AccountId, BalanceOf)>; #[pallet::error] pub enum Error { @@ -339,7 +339,7 @@ pub mod pallet { /// # #[pallet::weight({ let s = other_signatories.len() as u32; - let z = call.len() as u32; + let z = call.encoded_len() as u32; T::WeightInfo::as_multi_create(s, z) .max(T::WeightInfo::as_multi_create_store(s, z)) @@ -352,7 +352,7 @@ pub mod pallet { threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, - call: OpaqueCall, + call: OpaqueCall, store_call: bool, max_weight: Weight, ) -> DispatchResultWithPostInfo { @@ -406,9 +406,9 @@ pub mod pallet { let s = other_signatories.len() as u32; T::WeightInfo::approve_as_multi_create(s) - .max(T::WeightInfo::approve_as_multi_approve(s)) - .max(T::WeightInfo::approve_as_multi_complete(s)) - .saturating_add(*max_weight) + .max(T::WeightInfo::approve_as_multi_approve(s)) + .max(T::WeightInfo::approve_as_multi_complete(s)) + .saturating_add(*max_weight) })] pub fn approve_as_multi( origin: OriginFor, @@ -502,7 +502,7 @@ impl Pallet { threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, - call_or_hash: CallOrHash, + call_or_hash: CallOrHash, max_weight: Weight, ) -> DispatchResultWithPostInfo { ensure!(threshold >= 2, Error::::MinimumThreshold); @@ -517,8 +517,8 @@ impl Pallet { // Threshold > 1; this means it's a multi-step operation. We extract the `call_hash`. let (call_hash, call_len, maybe_call, store) = match call_or_hash { CallOrHash::Call(call, should_store) => { - let call_hash = blake2_256(&call); - let call_len = call.len(); + let call_hash = blake2_256(call.encoded()); + let call_len = call.encoded_len(); (call_hash, call_len, Some(call), should_store) }, CallOrHash::Hash(h) => (h, 0, None, false), @@ -541,7 +541,7 @@ impl Pallet { // We only bother fetching/decoding call if we know that we're ready to execute. let maybe_approved_call = if approvals >= threshold { - Self::get_call(&call_hash, maybe_call.as_ref().map(|c| c.as_ref())) + Self::get_call(&call_hash, maybe_call.as_ref()) } else { None }; @@ -658,13 +658,14 @@ impl Pallet { fn store_call_and_reserve( who: T::AccountId, hash: &[u8; 32], - data: OpaqueCall, + data: OpaqueCall, other_deposit: BalanceOf, ) -> DispatchResult { ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); let deposit = other_deposit + T::DepositBase::get() + - T::DepositFactor::get() * BalanceOf::::from(((data.len() + 31) / 32) as u32); + T::DepositFactor::get() * + BalanceOf::::from(((data.encoded_len() + 31) / 32) as u32); T::Currency::reserve(&who, deposit)?; Calls::::insert(&hash, (data, who, deposit)); Ok(()) @@ -673,15 +674,14 @@ impl Pallet { /// Attempt to decode and return the call, provided by the user or from storage. fn get_call( hash: &[u8; 32], - maybe_known: Option<&[u8]>, + maybe_known: Option<&OpaqueCall>, ) -> Option<(::Call, usize)> { maybe_known.map_or_else( || { - Calls::::get(hash).and_then(|(data, ..)| { - Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) - }) + Calls::::get(hash) + .and_then(|(data, ..)| Some((data.try_decode()?, data.encoded_len()))) }, - |data| Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())), + |data| Some((data.try_decode()?, data.encoded_len())), ) } diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 3d311cf5d3dc8..d46c22ec73d09 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -31,6 +31,7 @@ use sp_runtime::{ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; +type OpaqueCall = super::OpaqueCall; frame_support::construct_runtime!( pub enum Test where @@ -152,7 +153,7 @@ fn multisig_deposit_is_taken_and_returned() { 2, vec![2, 3], None, - data.clone(), + OpaqueCall::from_encoded(data.clone()), false, 0 )); @@ -164,7 +165,7 @@ fn multisig_deposit_is_taken_and_returned() { 2, vec![1, 3], Some(now()), - data, + OpaqueCall::from_encoded(data), false, call_weight )); @@ -185,7 +186,15 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data, true, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + OpaqueCall::from_encoded(data), + true, + 0 + )); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 5); @@ -231,7 +240,7 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { 3, vec![1, 3], Some(now()), - data, + OpaqueCall::from_encoded(data), true, 0 )); @@ -316,7 +325,15 @@ fn timepoint_checking_works() { assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_noop!( - Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone(), false, 0), + Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + None, + OpaqueCall::from_encoded(call.clone()), + false, + 0 + ), Error::::NoTimepoint, ); let later = Timepoint { index: 1, ..now() }; @@ -326,7 +343,7 @@ fn timepoint_checking_works() { 2, vec![1, 3], Some(later), - call.clone(), + OpaqueCall::from_encoded(call), false, 0 ), @@ -347,7 +364,15 @@ fn multisig_2_of_3_works_with_call_storing() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data, true, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + OpaqueCall::from_encoded(data), + true, + 0 + )); assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::approve_as_multi( @@ -382,7 +407,7 @@ fn multisig_2_of_3_works() { 2, vec![1, 3], Some(now()), - data, + OpaqueCall::from_encoded(data), false, call_weight )); @@ -425,7 +450,7 @@ fn multisig_3_of_3_works() { 3, vec![1, 2], Some(now()), - data, + OpaqueCall::from_encoded(data), false, call_weight )); @@ -473,7 +498,15 @@ fn cancel_multisig_with_call_storage_works() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::as_multi(Origin::signed(1), 3, vec![2, 3], None, call, true, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + OpaqueCall::from_encoded(call), + true, + 0 + )); assert_eq!(Balances::free_balance(1), 4); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), @@ -517,7 +550,7 @@ fn cancel_multisig_with_alt_call_storage_works() { 3, vec![1, 3], Some(now()), - call, + OpaqueCall::from_encoded(call), true, 0 )); @@ -544,7 +577,7 @@ fn multisig_2_of_3_as_multi_works() { 2, vec![2, 3], None, - data.clone(), + OpaqueCall::from_encoded(data.clone()), false, 0 )); @@ -555,7 +588,7 @@ fn multisig_2_of_3_as_multi_works() { 2, vec![1, 3], Some(now()), - data, + OpaqueCall::from_encoded(data), false, call_weight )); @@ -583,7 +616,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![2, 3], None, - data1.clone(), + OpaqueCall::from_encoded(data1.clone()), false, 0 )); @@ -592,7 +625,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 3], None, - data2.clone(), + OpaqueCall::from_encoded(data2.clone()), false, 0 )); @@ -601,7 +634,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 2], Some(now()), - data1, + OpaqueCall::from_encoded(data1), false, call1_weight )); @@ -610,7 +643,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 2], Some(now()), - data2, + OpaqueCall::from_encoded(data2), false, call2_weight )); @@ -637,7 +670,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![2, 3], None, - data.clone(), + OpaqueCall::from_encoded(data.clone()), false, 0 )); @@ -646,7 +679,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![1, 3], Some(now()), - data.clone(), + OpaqueCall::from_encoded(data.clone()), false, call_weight )); @@ -657,7 +690,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![2, 3], None, - data.clone(), + OpaqueCall::from_encoded(data.clone()), false, 0 )); @@ -666,7 +699,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![1, 2], Some(now()), - data.clone(), + OpaqueCall::from_encoded(data), false, call_weight )); @@ -683,11 +716,27 @@ fn minimum_threshold_check_works() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); assert_noop!( - Multisig::as_multi(Origin::signed(1), 0, vec![2], None, call.clone(), false, 0), + Multisig::as_multi( + Origin::signed(1), + 0, + vec![2], + None, + OpaqueCall::from_encoded(call.clone()), + false, + 0 + ), Error::::MinimumThreshold, ); assert_noop!( - Multisig::as_multi(Origin::signed(1), 1, vec![2], None, call.clone(), false, 0), + Multisig::as_multi( + Origin::signed(1), + 1, + vec![2], + None, + OpaqueCall::from_encoded(call.clone()), + false, + 0 + ), Error::::MinimumThreshold, ); }); @@ -698,7 +747,15 @@ fn too_many_signatories_fails() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); assert_noop!( - Multisig::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone(), false, 0), + Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3, 4], + None, + OpaqueCall::from_encoded(call), + false, + 0 + ), Error::::TooManySignatories, ); }); @@ -765,7 +822,15 @@ fn multisig_1_of_3_works() { Error::::MinimumThreshold, ); assert_noop!( - Multisig::as_multi(Origin::signed(1), 1, vec![2, 3], None, call.clone(), false, 0), + Multisig::as_multi( + Origin::signed(1), + 1, + vec![2, 3], + None, + OpaqueCall::from_encoded(call), + false, + 0 + ), Error::::MinimumThreshold, ); let boxed_call = Box::new(call_transfer(6, 15)); @@ -801,14 +866,22 @@ fn weight_check_works() { 2, vec![2, 3], None, - data.clone(), + OpaqueCall::from_encoded(data.clone()), false, 0 )); assert_eq!(Balances::free_balance(6), 0); assert_noop!( - Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, 0), + Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + OpaqueCall::from_encoded(data), + false, + 0 + ), Error::::MaxWeightTooLow, ); }); @@ -860,7 +933,7 @@ fn multisig_handles_no_preimage_after_all_approve() { 3, vec![1, 2], Some(now()), - data, + OpaqueCall::from_encoded(data), false, call_weight )); diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 513267c5c8ba6..43eadb3a05073 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -52,7 +52,8 @@ mod misc; pub use misc::{ Backing, ConstU32, EnsureInherentsAreFirst, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, - OnKilledAccount, OnNewAccount, SameOrOther, Time, TryDrop, UnixTime, WrapperOpaque, + OnKilledAccount, OnNewAccount, SameOrOther, Time, TryDrop, UnixTime, WrapperKeepOpaque, + WrapperOpaque, }; mod stored_map; diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 9109bfeeae722..6587945604d0b 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -18,7 +18,7 @@ //! Smaller traits used in FRAME which don't need their own file. use crate::dispatch::Parameter; -use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen}; +use codec::{CompactLen, Decode, DecodeAll, Encode, EncodeLike, Input, MaxEncodedLen}; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_runtime::{traits::Block as BlockT, DispatchError}; use sp_std::prelude::*; @@ -390,6 +390,7 @@ impl, const T: u32> EstimateCallFee for pub struct WrapperOpaque(pub T); impl EncodeLike for WrapperOpaque {} +impl EncodeLike> for WrapperOpaque {} impl Encode for WrapperOpaque { fn size_hint(&self) -> usize { @@ -456,6 +457,93 @@ impl TypeInfo for WrapperOpaque { } } +/// A wrapper for any type `T` which implement encode/decode in a way compatible with `Vec`. +/// +/// This type is similar to [`WrapperOpaque`], but it differs in the way it stores the type `T`. +/// While [`WrapperOpaque`] stores the decoded type, the [`WrapperKeepOpaque`] stores the type only +/// in its opaque format, aka as a `Vec`. To access the real type `T` [`Self::try_decode`] needs +/// to be used. +#[derive(Debug, Eq, PartialEq, Default, Clone)] +pub struct WrapperKeepOpaque { + data: Vec, + _phantom: sp_std::marker::PhantomData, +} + +impl WrapperKeepOpaque { + /// Try to decode the wrapped type from the inner `data`. + /// + /// Returns `None` if the decoding failed. + pub fn try_decode(&self) -> Option { + T::decode_all(&mut &self.data[..]).ok() + } + + /// Returns the length of the encoded `T`. + pub fn encoded_len(&self) -> usize { + self.data.len() + } + + /// Returns the encoded data. + pub fn encoded(&self) -> &[u8] { + &self.data + } + + /// Create from the given encoded `data`. + pub fn from_encoded(data: Vec) -> Self { + Self { data, _phantom: sp_std::marker::PhantomData } + } +} + +impl EncodeLike for WrapperKeepOpaque {} +impl EncodeLike> for WrapperKeepOpaque {} + +impl Encode for WrapperKeepOpaque { + fn size_hint(&self) -> usize { + self.data.len() + codec::Compact::::compact_len(&(self.data.len() as u32)) + } + + fn encode_to(&self, dest: &mut O) { + self.data.encode_to(dest); + } + + fn encode(&self) -> Vec { + self.data.encode() + } + + fn using_encoded R>(&self, f: F) -> R { + self.data.using_encoded(f) + } +} + +impl Decode for WrapperKeepOpaque { + fn decode(input: &mut I) -> Result { + Ok(Self { data: Vec::::decode(input)?, _phantom: sp_std::marker::PhantomData }) + } + + fn skip(input: &mut I) -> Result<(), codec::Error> { + >::skip(input) + } +} + +impl MaxEncodedLen for WrapperKeepOpaque { + fn max_encoded_len() -> usize { + WrapperOpaque::::max_encoded_len() + } +} + +impl TypeInfo for WrapperKeepOpaque { + type Identity = Self; + fn type_info() -> Type { + Type::builder() + .path(Path::new("WrapperKeepOpaque", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) + .composite( + Fields::unnamed() + .field(|f| f.compact::()) + .field(|f| f.ty::().type_name("T")), + ) + } +} + #[cfg(test)] mod test { use super::*; @@ -488,4 +576,17 @@ mod test { ); assert_eq!(>::max_encoded_len(), 2usize.pow(14) + 4); } + + #[test] + fn test_keep_opaque_wrapper() { + let data = 3u32.encode().encode(); + + let keep_opaque = WrapperKeepOpaque::::decode(&mut &data[..]).unwrap(); + keep_opaque.try_decode().unwrap(); + + let data = WrapperOpaque(50u32).encode(); + let decoded = WrapperKeepOpaque::::decode(&mut &data[..]).unwrap(); + let data = decoded.encode(); + WrapperOpaque::::decode(&mut &data[..]).unwrap(); + } } From 732f0371c2d4da7b0ec326c681e0c1d360fc1bd8 Mon Sep 17 00:00:00 2001 From: Luke Schoen Date: Mon, 25 Oct 2021 23:49:43 +1100 Subject: [PATCH 017/162] Offchain-worker: Update example-offchain-worker with implementation for TestAuthId (#10096) * Update example-offchain-worker to include missing implementation for TestAuthId i tried to incorporate the off-chain worker callback demo as a custom pallet of my own Substrate-based blockchain implementation that's provided at the following links * https://www.parity.io/blog/substrate-off-chain-workers-secure-and-efficient-computing-intensive-tasks/ * https://gnunicorn.github.io/substrate-offchain-cb/ but when i build the code with `cargo build --release`, it gave me an error: ``` error[E0277]: the trait bound `AuthorityId: AppCrypto` is not satisfied --> /Users/me/my_repo/node/runtime/src/lib.rs:1172:5 | 1172 | type AuthorityId = AuthorityId; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AppCrypto` is not implemented for `AuthorityId` | note: required by a bound in `offchaincb::Config::AuthorityId` --> /Users/me/my_repo/node/pallets/offchaincb/src/lib.rs:169:21 | 169 | type AuthorityId: AppCrypto; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `offchaincb::Config::AuthorityId` ``` where in my custom pallet i have: pallets/offchaincb/src/lib.rs ``` ... use offchaincb::{ crypto::{ TestAuthId, }, }; ... parameter_types! { pub const GracePeriod: BlockNumber = 1 * MINUTES; pub const UnsignedInterval: BlockNumber = 1 * MINUTES; pub const UnsignedPriority: BlockNumber = 1 * MINUTES; } impl offchaincb::Config for Runtime { type AuthorityId = TestAuthId; type Call = Call; type Currency = Balances; type Event = Event; type GracePeriod = GracePeriod; type UnsignedInterval = UnsignedInterval; type UnsignedPriority = UnsignedPriority; } ... ``` then i found another different off-chain workers Substrate Recipe demo from Jimmy Chu https://github.com/jimmychu0807/recipes/blob/master/pallets/ocw-demo/src/lib.rs#L73 which had an extra implementation for TestAuthId here https://github.com/jimmychu0807/recipes/blob/master/pallets/ocw-demo/src/lib.rs#L73, and when i added that it overcame the error. so i think this change should be included in the Substrate repository * Fix indentation * Fix formatting * Swap order --- frame/example-offchain-worker/src/lib.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index 644e1ca299a3c..9b63ffa663ee2 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -86,10 +86,19 @@ pub mod crypto { use sp_runtime::{ app_crypto::{app_crypto, sr25519}, traits::Verify, + MultiSignature, MultiSigner, }; app_crypto!(sr25519, KEY_TYPE); pub struct TestAuthId; + + impl frame_system::offchain::AppCrypto for TestAuthId { + type RuntimeAppPublic = Public; + type GenericSignature = sp_core::sr25519::Signature; + type GenericPublic = sp_core::sr25519::Public; + } + + // implemented for mock runtime in test impl frame_system::offchain::AppCrypto<::Signer, Sr25519Signature> for TestAuthId { From f37f1c84fd22e3ae91f19e09367d4f44f47a65d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 26 Oct 2021 19:10:45 +0200 Subject: [PATCH 018/162] Remove `ss58-registry.json` (#10094) This file shouldn't exist anymore, maybe it was added accidentally by some pr. People should move over to https://github.com/paritytech/ss58-registry now. --- ss58-registry.json | 671 --------------------------------------------- 1 file changed, 671 deletions(-) delete mode 100644 ss58-registry.json diff --git a/ss58-registry.json b/ss58-registry.json deleted file mode 100644 index fdae23a5a6f21..0000000000000 --- a/ss58-registry.json +++ /dev/null @@ -1,671 +0,0 @@ -{ - "specification": "https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)", - "schema": { - "prefix": "The address prefix. Must be an integer and unique.", - "network": "Unique identifier for the network that will use this prefix, string, no spaces. To integrate with CLI tools, e.g. `--network polkadot`.", - "displayName": "The name of the network that will use this prefix, in a format friendly for display.", - "symbols": "Array of symbols of any tokens the chain uses, usually 2-5 characters. Most chains will only have one. Chains that have multiple instances of the Balances pallet should order the array by instance.", - "decimals": "Array of integers representing the number of decimals that represent a single unit to the end user. Must be same length as `symbols` to represent each token's denomination.", - "standardAccount": "Signing curve for standard account. Substrate supports ed25519, sr25519, and secp256k1.", - "website": "A website or Github repo associated with the network." - }, - "registry": [ - { - "prefix": 0, - "network": "polkadot", - "displayName": "Polkadot Relay Chain", - "symbols": ["DOT"], - "decimals": [10], - "standardAccount": "*25519", - "website": "https://polkadot.network" - }, - { - "prefix": 1, - "network": null, - "displayName": "Bare 32-bit Schnorr/Ristretto (S/R 25519) public key.", - "symbols": null, - "decimals": null, - "standardAccount": null, - "website": null - }, - { - "prefix": 2, - "network": "kusama", - "displayName": "Kusama Relay Chain", - "symbols": ["KSM"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://kusama.network" - }, - { - "prefix": 3, - "network": null, - "displayName": "Bare 32-bit Ed25519 public key.", - "symbols": null, - "decimals": null, - "standardAccount": null, - "website": null - }, - { - "prefix": 4, - "network": "katalchain", - "displayName": "Katal Chain", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": null - }, - { - "prefix": 5, - "network": "plasm", - "displayName": "Plasm Network", - "symbols": ["PLM"], - "decimals": [15], - "standardAccount": "*25519", - "website": "https://plasmnet.io" - }, - { - "prefix": 6, - "network": "bifrost", - "displayName": "Bifrost", - "symbols": ["BNC"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://bifrost.finance/" - }, - { - "prefix": 7, - "network": "edgeware", - "displayName": "Edgeware", - "symbols": ["EDG"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://edgewa.re" - }, - { - "prefix": 8, - "network": "karura", - "displayName": "Karura", - "symbols": ["KAR"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://karura.network/" - }, - { - "prefix": 9, - "network": "reynolds", - "displayName": "Laminar Reynolds Canary", - "symbols": ["REY"], - "decimals": [18], - "standardAccount": "*25519", - "website": "http://laminar.network/" - }, - { - "prefix": 10, - "network": "acala", - "displayName": "Acala", - "symbols": ["ACA"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://acala.network/" - }, - { - "prefix": 11, - "network": "laminar", - "displayName": "Laminar", - "symbols": ["LAMI"], - "decimals": [18], - "standardAccount": "*25519", - "website": "http://laminar.network/" - }, - { - "prefix": 12, - "network": "polymesh", - "displayName": "Polymesh", - "symbols": ["POLYX"], - "decimals": [6], - "standardAccount": "*25519", - "website": "https://polymath.network/" - }, - { - "prefix": 13, - "network": "integritee", - "displayName": "Integritee", - "symbols": ["TEER"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://integritee.network" - }, - { - "prefix": 14, - "network": "totem", - "displayName": "Totem", - "symbols": ["XTX"], - "decimals": [0], - "standardAccount": "*25519", - "website": "https://totemaccounting.com" - }, - { - "prefix": 15, - "network": "synesthesia", - "displayName": "Synesthesia", - "symbols": ["SYN"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://synesthesia.network/" - }, - { - "prefix": 16, - "network": "kulupu", - "displayName": "Kulupu", - "symbols": ["KLP"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://kulupu.network/" - }, - { - "prefix": 17, - "network": "dark", - "displayName": "Dark Mainnet", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": null - }, - { - "prefix": 18, - "network": "darwinia", - "displayName": "Darwinia Network", - "symbols": ["RING", "KTON"], - "decimals": [9, 9], - "standardAccount": "*25519", - "website": "https://darwinia.network/" - }, - { - "prefix": 19, - "network": "geek", - "displayName": "GeekCash", - "symbols": ["GEEK"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://geekcash.org" - }, - { - "prefix": 20, - "network": "stafi", - "displayName": "Stafi", - "symbols": ["FIS"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://stafi.io" - }, - { - "prefix": 21, - "network": "dock-testnet", - "displayName": "Dock Testnet", - "symbols": ["DCK"], - "decimals": [6], - "standardAccount": "*25519", - "website": "https://dock.io" - }, - { - "prefix": 22, - "network": "dock-mainnet", - "displayName": "Dock Mainnet", - "symbols": ["DCK"], - "decimals": [6], - "standardAccount": "*25519", - "website": "https://dock.io" - }, - { - "prefix": 23, - "network": "shift", - "displayName": "ShiftNrg", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": null - }, - { - "prefix": 24, - "network": "zero", - "displayName": "ZERO", - "symbols": ["PLAY"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://zero.io" - }, - { - "prefix": 25, - "network": "zero-alphaville", - "displayName": "ZERO Alphaville", - "symbols": ["PLAY"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://zero.io" - }, - { - "prefix": 26, - "network": "jupiter", - "displayName": "Jupiter", - "symbols": ["jDOT"], - "decimals": [10], - "standardAccount": "*25519", - "website": "https://jupiter.patract.io" - }, - { - "prefix": 28, - "network": "subsocial", - "displayName": "Subsocial", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": null - }, - { - "prefix": 29, - "network": "cord", - "displayName": "Dhiway CORD Network", - "symbols": ["DCU"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://dhiway.com/" - }, - { - "prefix": 30, - "network": "phala", - "displayName": "Phala Network", - "symbols": ["PHA"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://phala.network" - }, - { - "prefix": 31, - "network": "litentry", - "displayName": "Litentry Network", - "symbols": ["LIT"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://litentry.com/" - }, - { - "prefix": 32, - "network": "robonomics", - "displayName": "Robonomics", - "symbols": ["XRT"], - "decimals": [9], - "standardAccount": "*25519", - "website": "https://robonomics.network" - }, - { - "prefix": 33, - "network": "datahighway", - "displayName": "DataHighway", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": null - }, - { - "prefix": 34, - "network": "ares", - "displayName": "Ares Protocol", - "symbols": ["ARES"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://www.aresprotocol.com/" - }, - { - "prefix": 35, - "network": "vln", - "displayName": "Valiu Liquidity Network", - "symbols": ["USDv"], - "decimals": [15], - "standardAccount": "*25519", - "website": "https://valiu.com/" - }, - { - "prefix": 36, - "network": "centrifuge", - "displayName": "Centrifuge Chain", - "symbols": ["CFG"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://centrifuge.io/" - }, - { - "prefix": 37, - "network": "nodle", - "displayName": "Nodle Chain", - "symbols": ["NODL"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://nodle.io/" - }, - { - "prefix": 38, - "network": "kilt", - "displayName": "KILT Chain", - "symbols": ["KILT"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://kilt.io/" - }, - { - "prefix": 39, - "network": "mathchain", - "displayName": "MathChain mainnet", - "symbols": ["MATH"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://mathwallet.org" - }, - { - "prefix": 40, - "network": "mathchain-testnet", - "displayName": "MathChain testnet", - "symbols": ["MATH"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://mathwallet.org" - }, - { - "prefix": 41, - "network": "poli", - "displayName": "Polimec Chain", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": "https://polimec.io/" - }, - { - "prefix": 42, - "network": "substrate", - "displayName": "Substrate", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": "https://docs.substrate.io/" - }, - { - "prefix": 43, - "network": null, - "displayName": "Bare 32-bit ECDSA SECP-256k1 public key.", - "symbols": null, - "decimals": null, - "standardAccount": null, - "website": null - }, - { - "prefix": 44, - "network": "chainx", - "displayName": "ChainX", - "symbols": ["PCX"], - "decimals": [8], - "standardAccount": "*25519", - "website": "https://chainx.org/" - }, - { - "prefix": 45, - "network": "uniarts", - "displayName": "UniArts Network", - "symbols": ["UART", "UINK"], - "decimals": [12, 12], - "standardAccount": "*25519", - "website": "https://uniarts.me" - }, - { - "prefix": 46, - "network": "reserved46", - "displayName": "This prefix is reserved.", - "symbols": null, - "decimals": null, - "standardAccount": null, - "website": null - }, - { - "prefix": 47, - "network": "reserved47", - "displayName": "This prefix is reserved.", - "symbols": null, - "decimals": null, - "standardAccount": null, - "website": null - }, - { - "prefix": 48, - "network": "neatcoin", - "displayName": "Neatcoin Mainnet", - "symbols": ["NEAT"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://neatcoin.org" - }, - { - "prefix": 49, - "network": "picasso", - "displayName": "Picasso", - "symbols": ["PICA"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://picasso.composable.finance" - }, - { - "prefix": 50, - "network": "composable", - "displayName": "Composable", - "symbols": ["LAYR"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://composable.finance" - }, - { - "prefix": 63, - "network": "hydradx", - "displayName": "HydraDX", - "symbols": ["HDX"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://hydradx.io" - }, - { - "prefix": 65, - "network": "aventus", - "displayName": "AvN Mainnet", - "symbols": ["AVT"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://aventus.io" - }, - { - "prefix": 66, - "network": "crust", - "displayName": "Crust Network", - "symbols": ["CRU"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://crust.network" - }, - { - "prefix": 67, - "network": "equilibrium", - "displayName": "Equilibrium Network", - "symbols": ["Unknown", "USD", "EQ", "ETH", "BTC", "EOS", "DOT", "CRV"], - "decimals": [0,9,9,9,9,9,9,9], - "standardAccount": "*25519", - "website": "https://equilibrium.io" - }, - { - "prefix": 69, - "network": "sora", - "displayName": "SORA Network", - "symbols": ["XOR"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://sora.org" - }, - { - "prefix": 73, - "network": "zeitgeist", - "displayName": "Zeitgeist", - "symbols": ["ZTG"], - "decimals": [10], - "standardAccount": "*25519", - "website": "https://zeitgeist.pm" - }, - { - "prefix": 77, - "network": "manta", - "displayName": "Manta network", - "symbols": ["MA"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://manta.network" - }, - { - "prefix": 78, - "network": "calamari", - "displayName": "Calamari: Manta Canary Network", - "symbols": ["KMA"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://manta.network" - }, - { - "prefix": 88, - "network": "polkadex", - "displayName": "Polkadex Mainnet", - "symbols": ["PDEX"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://polkadex.trade" - }, - { - "prefix": 98, - "network": "polkasmith", - "displayName": "PolkaSmith Canary Network", - "symbols": ["PKS"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://polkafoundry.com" - }, - { - "prefix": 99, - "network": "polkafoundry", - "displayName": "PolkaFoundry Network", - "symbols": ["PKF"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://polkafoundry.com" - }, - { - "prefix": 101, - "network": "origintrail-parachain", - "displayName": "OriginTrail Parachain", - "symbols": ["TRAC"], - "decimals": [18], - "standardAccount": "secp256k1", - "website": "https://origintrail.io" - }, - { - "prefix": 110, - "network": "heiko", - "displayName": "Heiko", - "symbols": ["HKO"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://parallel.fi/" - }, - { - "prefix": 113, - "network": "integritee-incognito", - "displayName": "Integritee Incognito", - "symbols": null, - "decimals": null, - "standardAccount": "*25519", - "website": "https://integritee.network" - }, - { - "prefix": 128, - "network": "clover", - "displayName": "Clover Finance", - "symbols": ["CLV"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://clover.finance" - }, - { - "prefix": 136, - "network": "altair", - "displayName": "Altair", - "symbols": ["AIR"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://centrifuge.io/" - }, - { - "prefix": 172, - "network": "parallel", - "displayName": "Parallel", - "symbols": ["PARA"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://parallel.fi/" - }, - { - "prefix": 252, - "network": "social-network", - "displayName": "Social Network", - "symbols": ["NET"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://social.network" - }, - { - "prefix": 1284, - "network": "moonbeam", - "displayName": "Moonbeam", - "symbols": ["GLMR"], - "decimals": [18], - "standardAccount": "secp256k1", - "website": "https://moonbeam.network" - }, - { - "prefix": 1285, - "network": "moonriver", - "displayName": "Moonriver", - "symbols": ["MOVR"], - "decimals": [18], - "standardAccount": "secp256k1", - "website": "https://moonbeam.network" - }, - { - "prefix": 2349, - "network": "automata", - "displayName": "Automata Mainnet", - "symbols": ["ATA"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://ata.network" - }, - { - "prefix": 10041, - "network": "basilisk", - "displayName": "Basilisk", - "symbols": ["BSX"], - "decimals": [12], - "standardAccount": "*25519", - "website": "https://bsx.fi" - }, - { - "prefix": 11820, - "network": "contextfree", - "displayName": "Automata ContextFree", - "symbols": ["CTX"], - "decimals": [18], - "standardAccount": "*25519", - "website": "https://ata.network" - } - ] -} From c5450ed3d85b150ddc7fb40a68d683887dac43c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Oct 2021 10:18:33 +0000 Subject: [PATCH 019/162] Bump libc from 0.2.103 to 0.2.105 (#10115) Bumps [libc](https://github.com/rust-lang/libc) from 0.2.103 to 0.2.105. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.103...0.2.105) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- client/executor/wasmtime/Cargo.toml | 2 +- client/tracing/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6325304bfceec..ed10002bd5489 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3266,9 +3266,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.103" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" +checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013" [[package]] name = "libgit2-sys" diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 9fb76ed08fd9a..7a877bd9578f5 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -libc = "0.2.90" +libc = "0.2.105" cfg-if = "1.0" log = "0.4.8" parity-wasm = "0.42.0" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 4939e6a73110c..8093420dc5b95 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -17,7 +17,7 @@ ansi_term = "0.12.1" atty = "0.2.13" chrono = "0.4.19" lazy_static = "1.4.0" -libc = "0.2.95" +libc = "0.2.105" log = { version = "0.4.8" } once_cell = "1.8.0" parking_lot = "0.11.1" From 69fa67d0a7647f3b33f679494684859e56161197 Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Fri, 29 Oct 2021 20:42:51 +0800 Subject: [PATCH 020/162] Remove useless WeightInfo in pallet-offences (#10114) --- frame/offences/src/lib.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 3392cd6e4a884..d50bc55f88357 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -43,28 +43,6 @@ type OpaqueTimeSlot = Vec; /// A type alias for a report identifier. type ReportIdOf = ::Hash; -pub trait WeightInfo { - fn report_offence_im_online(r: u32, o: u32, n: u32) -> Weight; - fn report_offence_grandpa(r: u32, n: u32) -> Weight; - fn report_offence_babe(r: u32, n: u32) -> Weight; - fn on_initialize(d: u32) -> Weight; -} - -impl WeightInfo for () { - fn report_offence_im_online(_r: u32, _o: u32, _n: u32) -> Weight { - 1_000_000_000 - } - fn report_offence_grandpa(_r: u32, _n: u32) -> Weight { - 1_000_000_000 - } - fn report_offence_babe(_r: u32, _n: u32) -> Weight { - 1_000_000_000 - } - fn on_initialize(_d: u32) -> Weight { - 1_000_000_000 - } -} - #[frame_support::pallet] pub mod pallet { use super::*; From 945377f35fd6ba265d6bd2b808e97749471dec0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 29 Oct 2021 18:00:49 +0200 Subject: [PATCH 021/162] pallet-scheduler: Introduce `OriginPrivilegeCmp` (#10078) * pallet-scheduler: Introduce `OriginPrivilegeCmp` When a scheduled task should be canceled, the origin that tries to cancel the task is compared to the origin the task should be executed with. Before this pr this check only allowed that both origins are equal. However, this is problematic as this means that for example a council origin it needs to be have the same amount of yes votes to cancel the scheduled task. While a council origin with more yes votes should be able to cancel this task. This happened recently on Kusama and lead to a failed cancelation of a scheduled task. With this pr the two origins are compared and the cancelling origin needs to have greater or equal privileges as the origin that scheduled the task. What a greater, equal or less privilege is, can be configured in the runtime. For simplicity, a `EqualPrivilegeOnly` implementation is provided that only checks if two origins are equal. So, this mimics the old behaviour. * FMT * fix import * Small optimizations Co-authored-by: Shawn Tabrizi --- bin/node/runtime/src/lib.rs | 5 +++-- frame/democracy/src/tests.rs | 3 ++- frame/scheduler/src/lib.rs | 26 +++++++++++++++++++++----- frame/support/src/traits.rs | 8 ++++---- frame/support/src/traits/misc.rs | 22 +++++++++++++++++++++- 5 files changed, 51 insertions(+), 13 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c7920629bf356..0638e62faa362 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -26,8 +26,8 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ construct_runtime, parameter_types, traits::{ - Currency, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, - Nothing, OnUnbalanced, U128CurrencyToVote, + Currency, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, + LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, @@ -345,6 +345,7 @@ impl pallet_scheduler::Config for Runtime { type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = pallet_scheduler::weights::SubstrateWeight; + type OriginPrivilegeCmp = EqualPrivilegeOnly; } parameter_types! { diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index f56667e9094b3..06c4ac666cfba 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -22,7 +22,7 @@ use crate as pallet_democracy; use codec::Encode; use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, - traits::{Contains, GenesisBuild, OnInitialize, SortedMembers}, + traits::{Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, SortedMembers}, weights::Weight, }; use frame_system::{EnsureRoot, EnsureSignedBy}; @@ -118,6 +118,7 @@ impl pallet_scheduler::Config for Test { type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = (); type WeightInfo = (); + type OriginPrivilegeCmp = EqualPrivilegeOnly; } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index ca9e15812a76d..d59c42cc850dd 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -58,7 +58,7 @@ use frame_support::{ dispatch::{DispatchError, DispatchResult, Dispatchable, Parameter}, traits::{ schedule::{self, DispatchTime}, - EnsureOrigin, Get, IsType, OriginTrait, + EnsureOrigin, Get, IsType, OriginTrait, PrivilegeCmp, }, weights::{GetDispatchInfo, Weight}, }; @@ -69,7 +69,7 @@ use sp_runtime::{ traits::{BadOrigin, One, Saturating, Zero}, RuntimeDebug, }; -use sp_std::{borrow::Borrow, marker::PhantomData, prelude::*}; +use sp_std::{borrow::Borrow, cmp::Ordering, marker::PhantomData, prelude::*}; pub use weights::WeightInfo; /// Just a simple index for naming period tasks. @@ -160,6 +160,15 @@ pub mod pallet { /// Required origin to schedule or cancel calls. type ScheduleOrigin: EnsureOrigin<::Origin>; + /// Compare the privileges of origins. + /// + /// This will be used when canceling a task, to ensure that the origin that tries + /// to cancel has greater or equal privileges as the origin that created the scheduled task. + /// + /// For simplicity the [`EqualPrivilegeOnly`](frame_support::traits::EqualPrivilegeOnly) can + /// be used. This will only check if two given origins are equal. + type OriginPrivilegeCmp: PrivilegeCmp; + /// The maximum number of scheduled calls in the queue for a single block. /// Not strictly enforced, but used for weight estimation. #[pallet::constant] @@ -614,7 +623,10 @@ impl Pallet { Ok(None), |s| -> Result>, DispatchError> { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { - if *o != s.origin { + if matches!( + T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), + Some(Ordering::Less) | None + ) { return Err(BadOrigin.into()) } }; @@ -709,7 +721,10 @@ impl Pallet { Agenda::::try_mutate(when, |agenda| -> DispatchResult { if let Some(s) = agenda.get_mut(i) { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { - if *o != s.origin { + if matches!( + T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), + Some(Ordering::Less) | None + ) { return Err(BadOrigin.into()) } } @@ -832,7 +847,7 @@ mod tests { use crate as scheduler; use frame_support::{ assert_err, assert_noop, assert_ok, ord_parameter_types, parameter_types, - traits::{Contains, OnFinalize, OnInitialize}, + traits::{Contains, EqualPrivilegeOnly, OnFinalize, OnInitialize}, weights::constants::RocksDbWeight, Hashable, }; @@ -980,6 +995,7 @@ mod tests { type ScheduleOrigin = EnsureOneOf, EnsureSignedBy>; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = (); + type OriginPrivilegeCmp = EqualPrivilegeOnly; } pub type LoggerCall = logger::Call; diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 43eadb3a05073..bb990e25646db 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -50,10 +50,10 @@ pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter mod misc; pub use misc::{ - Backing, ConstU32, EnsureInherentsAreFirst, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, - GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, - OnKilledAccount, OnNewAccount, SameOrOther, Time, TryDrop, UnixTime, WrapperKeepOpaque, - WrapperOpaque, + Backing, ConstU32, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, + ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, + OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, TryDrop, + UnixTime, WrapperKeepOpaque, WrapperOpaque, }; mod stored_map; diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 6587945604d0b..0a3fb045d6c1d 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -21,7 +21,7 @@ use crate::dispatch::Parameter; use codec::{CompactLen, Decode, DecodeAll, Encode, EncodeLike, Input, MaxEncodedLen}; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_runtime::{traits::Block as BlockT, DispatchError}; -use sp_std::prelude::*; +use sp_std::{cmp::Ordering, prelude::*}; /// Anything that can have a `::len()` method. pub trait Len { @@ -289,6 +289,26 @@ pub trait ExecuteBlock { fn execute_block(block: Block); } +/// Something that can compare privileges of two origins. +pub trait PrivilegeCmp { + /// Compare the `left` to the `right` origin. + /// + /// The returned ordering should be from the pov of the `left` origin. + /// + /// Should return `None` when it can not compare the given origins. + fn cmp_privilege(left: &Origin, right: &Origin) -> Option; +} + +/// Implementation of [`PrivilegeCmp`] that only checks for equal origins. +/// +/// This means it will either return [`Origin::Equal`] or `None`. +pub struct EqualPrivilegeOnly; +impl PrivilegeCmp for EqualPrivilegeOnly { + fn cmp_privilege(left: &Origin, right: &Origin) -> Option { + (left == right).then(|| Ordering::Equal) + } +} + /// Off-chain computation trait. /// /// Implementing this trait on a module allows you to perform long-running tasks From a7cc69f386eaf567fbf667b8c930da87cac1ce7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Sat, 30 Oct 2021 11:13:18 +0200 Subject: [PATCH 022/162] contracts: Fix account counter isn't persisted (#10112) * Add test to check account counter persistence * Fix bug that account counter wasn't properly persited * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Bot --- frame/contracts/src/exec.rs | 107 +++- frame/contracts/src/weights.rs | 1090 ++++++++++++++++---------------- 2 files changed, 643 insertions(+), 554 deletions(-) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 7fa0b0b274449..c28490dfacccf 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -538,14 +538,15 @@ where value: BalanceOf, debug_message: Option<&'a mut Vec>, ) -> Result<(Self, E), ExecError> { - let (first_frame, executable) = Self::new_frame(args, value, gas_meter, 0, &schedule)?; + let (first_frame, executable, account_counter) = + Self::new_frame(args, value, gas_meter, 0, &schedule)?; let stack = Self { origin, schedule, gas_meter, timestamp: T::Time::now(), block_number: >::block_number(), - account_counter: None, + account_counter, first_frame, frames: Default::default(), debug_message, @@ -565,8 +566,9 @@ where gas_meter: &mut GasMeter, gas_limit: Weight, schedule: &Schedule, - ) -> Result<(Frame, E), ExecError> { - let (account_id, contract_info, executable, entry_point) = match frame_args { + ) -> Result<(Frame, E, Option), ExecError> { + let (account_id, contract_info, executable, entry_point, account_counter) = match frame_args + { FrameArgs::Call { dest, cached_info } => { let contract = if let Some(contract) = cached_info { contract @@ -576,7 +578,7 @@ where let executable = E::from_storage(contract.code_hash, schedule, gas_meter)?; - (dest, contract, executable, ExportedFunction::Call) + (dest, contract, executable, ExportedFunction::Call, None) }, FrameArgs::Instantiate { sender, trie_seed, executable, salt } => { let account_id = @@ -587,7 +589,7 @@ where trie_id, executable.code_hash().clone(), )?; - (account_id, contract, executable, ExportedFunction::Constructor) + (account_id, contract, executable, ExportedFunction::Constructor, Some(trie_seed)) }, }; @@ -600,7 +602,7 @@ where allows_reentry: true, }; - Ok((frame, executable)) + Ok((frame, executable, account_counter)) } /// Create a subsequent nested frame. @@ -629,7 +631,7 @@ where let nested_meter = &mut self.frames.last_mut().unwrap_or(&mut self.first_frame).nested_meter; - let (frame, executable) = + let (frame, executable, _) = Self::new_frame(frame_args, value_transferred, nested_meter, gas_limit, self.schedule)?; self.frames.push(frame); Ok(executable) @@ -842,7 +844,7 @@ where /// Increments the cached account id and returns the value to be used for the trie_id. fn next_trie_seed(&mut self) -> u64 { let next = if let Some(current) = self.account_counter { - current + 1 + current.wrapping_add(1) } else { Self::initial_trie_seed() }; @@ -2165,4 +2167,91 @@ mod tests { ); }); } + + #[test] + fn account_counter() { + let fail_code = MockLoader::insert(Constructor, |_, _| exec_trapped()); + let success_code = MockLoader::insert(Constructor, |_, _| exec_success()); + let succ_fail_code = MockLoader::insert(Constructor, move |ctx, _| { + ctx.ext + .instantiate(0, fail_code, ctx.ext.minimum_balance() * 100, vec![], &[]) + .ok(); + exec_success() + }); + let succ_succ_code = MockLoader::insert(Constructor, move |ctx, _| { + let (account_id, _) = ctx + .ext + .instantiate(0, success_code, ctx.ext.minimum_balance() * 100, vec![], &[]) + .unwrap(); + + // a plain call should not influence the account counter + ctx.ext.call(0, account_id, 0, vec![], false).unwrap(); + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let schedule = ::Schedule::get(); + let min_balance = ::Currency::minimum_balance(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let fail_executable = + MockExecutable::from_storage(fail_code, &schedule, &mut gas_meter).unwrap(); + let success_executable = + MockExecutable::from_storage(success_code, &schedule, &mut gas_meter).unwrap(); + let succ_fail_executable = + MockExecutable::from_storage(succ_fail_code, &schedule, &mut gas_meter).unwrap(); + let succ_succ_executable = + MockExecutable::from_storage(succ_succ_code, &schedule, &mut gas_meter).unwrap(); + set_balance(&ALICE, min_balance * 1000); + + MockStack::run_instantiate( + ALICE, + fail_executable, + &mut gas_meter, + &schedule, + min_balance * 100, + vec![], + &[], + None, + ) + .ok(); + assert_eq!(>::get(), 0); + + assert_ok!(MockStack::run_instantiate( + ALICE, + success_executable, + &mut gas_meter, + &schedule, + min_balance * 100, + vec![], + &[], + None, + )); + assert_eq!(>::get(), 1); + + assert_ok!(MockStack::run_instantiate( + ALICE, + succ_fail_executable, + &mut gas_meter, + &schedule, + min_balance * 200, + vec![], + &[], + None, + )); + assert_eq!(>::get(), 2); + + assert_ok!(MockStack::run_instantiate( + ALICE, + succ_succ_executable, + &mut gas_meter, + &schedule, + min_balance * 200, + vec![], + &[], + None, + )); + assert_eq!(>::get(), 4); + }); + } } diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 1cebcb3b5d9a0..4b6c40764ad0a 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-09-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-10-28, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -151,83 +151,83 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize() -> Weight { - (3_226_000 as Weight) + (2_987_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) // Standard Error: 2_000 - .saturating_add((2_178_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((2_201_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (78_329_000 as Weight) - // Standard Error: 1_000 - .saturating_add((353_000 as Weight).saturating_mul(q as Weight)) + (97_470_000 as Weight) + // Standard Error: 2_000 + .saturating_add((322_000 as Weight).saturating_mul(q as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) fn instrument(c: u32, ) -> Weight { - (37_190_000 as Weight) - // Standard Error: 80_000 - .saturating_add((72_791_000 as Weight).saturating_mul(c as Weight)) + (28_804_000 as Weight) + // Standard Error: 84_000 + .saturating_add((71_838_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:0) fn code_load(c: u32, ) -> Weight { - (6_191_000 as Weight) + (5_658_000 as Weight) // Standard Error: 0 - .saturating_add((1_426_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((1_425_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) fn code_refcount(c: u32, ) -> Weight { - (10_333_000 as Weight) + (9_001_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_275_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_281_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (438_556_000 as Weight) - // Standard Error: 147_000 - .saturating_add((179_307_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 9_000 - .saturating_add((2_159_000 as Weight).saturating_mul(s as Weight)) + (499_349_000 as Weight) + // Standard Error: 199_000 + .saturating_add((174_439_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 13_000 + .saturating_add((2_096_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) - // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn instantiate(s: u32, ) -> Weight { - (186_776_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_033_000 as Weight).saturating_mul(s as Weight)) + (181_151_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_025_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (159_247_000 as Weight) + (153_830_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -235,9 +235,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_caller(r: u32, ) -> Weight { - (422_263_000 as Weight) - // Standard Error: 159_000 - .saturating_add((125_490_000 as Weight).saturating_mul(r as Weight)) + (423_222_000 as Weight) + // Standard Error: 169_000 + .saturating_add((114_763_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -245,9 +245,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_address(r: u32, ) -> Weight { - (423_009_000 as Weight) - // Standard Error: 183_000 - .saturating_add((125_795_000 as Weight).saturating_mul(r as Weight)) + (420_731_000 as Weight) + // Standard Error: 165_000 + .saturating_add((115_213_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -255,9 +255,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas_left(r: u32, ) -> Weight { - (429_297_000 as Weight) - // Standard Error: 164_000 - .saturating_add((124_324_000 as Weight).saturating_mul(r as Weight)) + (422_407_000 as Weight) + // Standard Error: 176_000 + .saturating_add((113_935_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -266,9 +266,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:0) fn seal_balance(r: u32, ) -> Weight { - (442_330_000 as Weight) - // Standard Error: 187_000 - .saturating_add((354_665_000 as Weight).saturating_mul(r as Weight)) + (425_698_000 as Weight) + // Standard Error: 210_000 + .saturating_add((335_171_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -276,9 +276,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_value_transferred(r: u32, ) -> Weight { - (411_893_000 as Weight) - // Standard Error: 178_000 - .saturating_add((125_971_000 as Weight).saturating_mul(r as Weight)) + (410_218_000 as Weight) + // Standard Error: 187_000 + .saturating_add((115_360_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -286,9 +286,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_minimum_balance(r: u32, ) -> Weight { - (413_273_000 as Weight) - // Standard Error: 180_000 - .saturating_add((125_103_000 as Weight).saturating_mul(r as Weight)) + (402_765_000 as Weight) + // Standard Error: 169_000 + .saturating_add((116_553_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -296,9 +296,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_tombstone_deposit(r: u32, ) -> Weight { - (415_613_000 as Weight) - // Standard Error: 192_000 - .saturating_add((126_106_000 as Weight).saturating_mul(r as Weight)) + (404_817_000 as Weight) + // Standard Error: 173_000 + .saturating_add((115_894_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -306,9 +306,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_block_number(r: u32, ) -> Weight { - (414_718_000 as Weight) - // Standard Error: 170_000 - .saturating_add((124_962_000 as Weight).saturating_mul(r as Weight)) + (405_604_000 as Weight) + // Standard Error: 193_000 + .saturating_add((115_757_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -316,9 +316,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_now(r: u32, ) -> Weight { - (419_120_000 as Weight) - // Standard Error: 178_000 - .saturating_add((125_188_000 as Weight).saturating_mul(r as Weight)) + (413_577_000 as Weight) + // Standard Error: 166_000 + .saturating_add((115_115_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -327,9 +327,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) fn seal_weight_to_fee(r: u32, ) -> Weight { - (419_125_000 as Weight) - // Standard Error: 216_000 - .saturating_add((290_592_000 as Weight).saturating_mul(r as Weight)) + (413_932_000 as Weight) + // Standard Error: 201_000 + .saturating_add((272_742_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -337,9 +337,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas(r: u32, ) -> Weight { - (149_609_000 as Weight) - // Standard Error: 117_000 - .saturating_add((56_860_000 as Weight).saturating_mul(r as Weight)) + (144_109_000 as Weight) + // Standard Error: 96_000 + .saturating_add((52_461_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -347,9 +347,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input(r: u32, ) -> Weight { - (423_570_000 as Weight) - // Standard Error: 151_000 - .saturating_add((106_985_000 as Weight).saturating_mul(r as Weight)) + (422_584_000 as Weight) + // Standard Error: 158_000 + .saturating_add((98_316_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -357,9 +357,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input_per_kb(n: u32, ) -> Weight { - (566_496_000 as Weight) - // Standard Error: 6_000 - .saturating_add((38_091_000 as Weight).saturating_mul(n as Weight)) + (549_530_000 as Weight) + // Standard Error: 8_000 + .saturating_add((38_025_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -367,9 +367,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return(r: u32, ) -> Weight { - (406_811_000 as Weight) - // Standard Error: 1_833_000 - .saturating_add((6_551_000 as Weight).saturating_mul(r as Weight)) + (403_711_000 as Weight) + // Standard Error: 114_000 + .saturating_add((2_996_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -377,9 +377,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return_per_kb(n: u32, ) -> Weight { - (412_094_000 as Weight) + (408_252_000 as Weight) // Standard Error: 1_000 - .saturating_add((631_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((630_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -389,9 +389,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: System Account (r:2 w:2) fn seal_terminate(r: u32, ) -> Weight { - (415_716_000 as Weight) - // Standard Error: 1_608_000 - .saturating_add((72_648_000 as Weight).saturating_mul(r as Weight)) + (412_619_000 as Weight) + // Standard Error: 896_000 + .saturating_add((66_155_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -402,9 +402,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) fn seal_random(r: u32, ) -> Weight { - (421_387_000 as Weight) - // Standard Error: 275_000 - .saturating_add((393_452_000 as Weight).saturating_mul(r as Weight)) + (416_604_000 as Weight) + // Standard Error: 274_000 + .saturating_add((366_304_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -412,9 +412,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_deposit_event(r: u32, ) -> Weight { - (428_591_000 as Weight) - // Standard Error: 293_000 - .saturating_add((690_833_000 as Weight).saturating_mul(r as Weight)) + (417_326_000 as Weight) + // Standard Error: 457_000 + .saturating_add((640_211_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -423,11 +423,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:100 w:100) fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_245_676_000 as Weight) - // Standard Error: 2_636_000 - .saturating_add((484_691_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 519_000 - .saturating_add((165_836_000 as Weight).saturating_mul(n as Weight)) + (1_121_348_000 as Weight) + // Standard Error: 2_483_000 + .saturating_add((463_498_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 489_000 + .saturating_add((167_147_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -437,17 +437,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_debug_message(r: u32, ) -> Weight { - (162_162_000 as Weight) - // Standard Error: 127_000 - .saturating_add((72_828_000 as Weight).saturating_mul(r as Weight)) + (159_880_000 as Weight) + // Standard Error: 138_000 + .saturating_add((67_837_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_set_storage(r: u32, ) -> Weight { - (399_301_000 as Weight) - // Standard Error: 221_000 - .saturating_add((245_222_000 as Weight).saturating_mul(r as Weight)) + (389_400_000 as Weight) + // Standard Error: 239_000 + .saturating_add((238_933_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) @@ -457,26 +457,26 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:0 w:1) fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (623_011_000 as Weight) - // Standard Error: 246_000 - .saturating_add((72_051_000 as Weight).saturating_mul(n as Weight)) + (611_980_000 as Weight) + // Standard Error: 234_000 + .saturating_add((72_047_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_clear_storage(r: u32, ) -> Weight { - (445_102_000 as Weight) - // Standard Error: 247_000 - .saturating_add((224_384_000 as Weight).saturating_mul(r as Weight)) + (436_588_000 as Weight) + // Standard Error: 222_000 + .saturating_add((209_734_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) fn seal_get_storage(r: u32, ) -> Weight { - (290_227_000 as Weight) - // Standard Error: 694_000 - .saturating_add((547_193_000 as Weight).saturating_mul(r as Weight)) + (285_689_000 as Weight) + // Standard Error: 742_000 + .saturating_add((496_745_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -486,9 +486,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:0) fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (737_772_000 as Weight) - // Standard Error: 267_000 - .saturating_add((112_216_000 as Weight).saturating_mul(n as Weight)) + (693_967_000 as Weight) + // Standard Error: 226_000 + .saturating_add((111_370_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -497,9 +497,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:101 w:101) fn seal_transfer(r: u32, ) -> Weight { - (383_402_000 as Weight) - // Standard Error: 2_184_000 - .saturating_add((4_335_681_000 as Weight).saturating_mul(r as Weight)) + (332_032_000 as Weight) + // Standard Error: 2_537_000 + .saturating_add((4_071_041_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -510,8 +510,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 11_019_000 - .saturating_add((39_806_777_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 10_806_000 + .saturating_add((39_442_275_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -522,13 +522,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:101 w:101) fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (38_662_592_000 as Weight) - // Standard Error: 52_762_000 - .saturating_add((3_888_801_000 as Weight).saturating_mul(t as Weight)) + (38_600_435_000 as Weight) + // Standard Error: 53_014_000 + .saturating_add((3_392_887_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 18_000 - .saturating_add((63_571_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((63_348_000 as Weight).saturating_mul(i as Weight)) // Standard Error: 20_000 - .saturating_add((101_610_000 as Weight).saturating_mul(o as Weight)) + .saturating_add((101_366_000 as Weight).saturating_mul(o as Weight)) .saturating_add(T::DbWeight::get().reads(104 as Weight)) .saturating_add(T::DbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes(101 as Weight)) @@ -540,9 +540,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts AccountCounter (r:1 w:1) // Storage: System Account (r:101 w:101) fn seal_instantiate(r: u32, ) -> Weight { - (626_132_000 as Weight) - // Standard Error: 39_245_000 - .saturating_add((46_398_859_000 as Weight).saturating_mul(r as Weight)) + (643_999_000 as Weight) + // Standard Error: 37_244_000 + .saturating_add((45_559_839_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) @@ -554,13 +554,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts AccountCounter (r:1 w:1) // Storage: System Account (r:101 w:101) fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { - (46_649_369_000 as Weight) - // Standard Error: 26_000 - .saturating_add((63_469_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 26_000 - .saturating_add((100_694_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 26_000 - .saturating_add((201_705_000 as Weight).saturating_mul(s as Weight)) + (45_415_035_000 as Weight) + // Standard Error: 30_000 + .saturating_add((63_567_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 30_000 + .saturating_add((100_900_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 30_000 + .saturating_add((201_139_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(206 as Weight)) .saturating_add(T::DbWeight::get().writes(204 as Weight)) } @@ -568,9 +568,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256(r: u32, ) -> Weight { - (417_820_000 as Weight) - // Standard Error: 160_000 - .saturating_add((133_795_000 as Weight).saturating_mul(r as Weight)) + (417_335_000 as Weight) + // Standard Error: 174_000 + .saturating_add((126_268_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -578,9 +578,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (609_012_000 as Weight) - // Standard Error: 23_000 - .saturating_add((499_227_000 as Weight).saturating_mul(n as Weight)) + (700_565_000 as Weight) + // Standard Error: 68_000 + .saturating_add((499_898_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -588,9 +588,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256(r: u32, ) -> Weight { - (419_043_000 as Weight) - // Standard Error: 177_000 - .saturating_add((140_704_000 as Weight).saturating_mul(r as Weight)) + (416_014_000 as Weight) + // Standard Error: 168_000 + .saturating_add((134_320_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -598,9 +598,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (564_451_000 as Weight) + (534_466_000 as Weight) // Standard Error: 19_000 - .saturating_add((346_948_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((346_588_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -608,9 +608,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256(r: u32, ) -> Weight { - (420_951_000 as Weight) - // Standard Error: 163_000 - .saturating_add((113_596_000 as Weight).saturating_mul(r as Weight)) + (414_278_000 as Weight) + // Standard Error: 164_000 + .saturating_add((106_210_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -618,9 +618,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (563_168_000 as Weight) - // Standard Error: 17_000 - .saturating_add((164_114_000 as Weight).saturating_mul(n as Weight)) + (569_659_000 as Weight) + // Standard Error: 16_000 + .saturating_add((163_989_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -628,9 +628,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128(r: u32, ) -> Weight { - (418_794_000 as Weight) - // Standard Error: 167_000 - .saturating_add((113_205_000 as Weight).saturating_mul(r as Weight)) + (421_251_000 as Weight) + // Standard Error: 166_000 + .saturating_add((104_678_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -638,9 +638,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (584_668_000 as Weight) - // Standard Error: 15_000 - .saturating_add((164_127_000 as Weight).saturating_mul(n as Weight)) + (568_490_000 as Weight) + // Standard Error: 21_000 + .saturating_add((163_999_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -648,264 +648,264 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_ecdsa_recover(r: u32, ) -> Weight { - (435_443_000 as Weight) - // Standard Error: 1_408_000 - .saturating_add((15_624_877_000 as Weight).saturating_mul(r as Weight)) + (361_122_000 as Weight) + // Standard Error: 1_172_000 + .saturating_add((15_591_590_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (45_937_000 as Weight) + (46_003_000 as Weight) // Standard Error: 10_000 - .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_185_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (44_001_000 as Weight) - // Standard Error: 11_000 - .saturating_add((2_412_000 as Weight).saturating_mul(r as Weight)) + (42_908_000 as Weight) + // Standard Error: 13_000 + .saturating_add((2_570_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (43_157_000 as Weight) - // Standard Error: 12_000 - .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + (42_739_000 as Weight) + // Standard Error: 13_000 + .saturating_add((2_791_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (48_475_000 as Weight) + (47_543_000 as Weight) // Standard Error: 8_000 - .saturating_add((2_604_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_834_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (50_649_000 as Weight) - // Standard Error: 12_000 - .saturating_add((2_553_000 as Weight).saturating_mul(r as Weight)) + (50_540_000 as Weight) + // Standard Error: 13_000 + .saturating_add((2_663_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (48_433_000 as Weight) + (47_732_000 as Weight) // Standard Error: 8_000 - .saturating_add((1_670_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_771_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (49_244_000 as Weight) - // Standard Error: 16_000 - .saturating_add((1_946_000 as Weight).saturating_mul(r as Weight)) + (49_005_000 as Weight) + // Standard Error: 17_000 + .saturating_add((2_072_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (46_117_000 as Weight) + (45_975_000 as Weight) // Standard Error: 17_000 - .saturating_add((2_387_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_492_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(_e: u32, ) -> Weight { - (55_204_000 as Weight) + (55_461_000 as Weight) } fn instr_call(r: u32, ) -> Weight { - (43_651_000 as Weight) - // Standard Error: 26_000 - .saturating_add((19_163_000 as Weight).saturating_mul(r as Weight)) + (41_932_000 as Weight) + // Standard Error: 29_000 + .saturating_add((19_800_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (54_063_000 as Weight) - // Standard Error: 32_000 - .saturating_add((27_970_000 as Weight).saturating_mul(r as Weight)) + (56_550_000 as Weight) + // Standard Error: 34_000 + .saturating_add((28_414_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (88_527_000 as Weight) + (93_172_000 as Weight) // Standard Error: 6_000 - .saturating_add((958_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((1_018_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (55_066_000 as Weight) - // Standard Error: 12_000 - .saturating_add((682_000 as Weight).saturating_mul(r as Weight)) + (54_603_000 as Weight) + // Standard Error: 14_000 + .saturating_add((764_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (55_298_000 as Weight) - // Standard Error: 13_000 - .saturating_add((778_000 as Weight).saturating_mul(r as Weight)) + (54_763_000 as Weight) + // Standard Error: 14_000 + .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (56_302_000 as Weight) + (56_137_000 as Weight) // Standard Error: 11_000 - .saturating_add((1_079_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_194_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (71_567_000 as Weight) + (69_513_000 as Weight) // Standard Error: 11_000 - .saturating_add((1_107_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_125_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (71_186_000 as Weight) + (69_120_000 as Weight) // Standard Error: 12_000 - .saturating_add((1_151_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_215_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (46_240_000 as Weight) + (46_021_000 as Weight) // Standard Error: 10_000 - .saturating_add((1_044_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_103_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (52_369_000 as Weight) - // Standard Error: 2_508_000 - .saturating_add((615_448_000 as Weight).saturating_mul(r as Weight)) + (52_245_000 as Weight) + // Standard Error: 4_119_000 + .saturating_add((619_498_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (47_623_000 as Weight) + (47_314_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_720_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (47_670_000 as Weight) + (47_855_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_701_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (47_508_000 as Weight) - // Standard Error: 9_000 - .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + (47_704_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_708_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (48_109_000 as Weight) + (47_656_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_580_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_705_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (55_270_000 as Weight) - // Standard Error: 9_000 - .saturating_add((1_102_000 as Weight).saturating_mul(r as Weight)) + (55_202_000 as Weight) + // Standard Error: 11_000 + .saturating_add((1_229_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (55_093_000 as Weight) + (55_193_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_223_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (48_265_000 as Weight) + (48_125_000 as Weight) // Standard Error: 10_000 - .saturating_add((1_573_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_704_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (48_733_000 as Weight) + (49_162_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_088_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_241_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (48_831_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_085_000 as Weight).saturating_mul(r as Weight)) + (48_635_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_262_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (49_147_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_056_000 as Weight).saturating_mul(r as Weight)) + (48_550_000 as Weight) + // Standard Error: 9_000 + .saturating_add((2_267_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (49_596_000 as Weight) + (49_135_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_049_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_219_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (49_872_000 as Weight) + (49_638_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_038_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_206_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (48_843_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_081_000 as Weight).saturating_mul(r as Weight)) + (49_889_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_201_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (48_765_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + (49_763_000 as Weight) + // Standard Error: 9_000 + .saturating_add((2_210_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (48_720_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_083_000 as Weight).saturating_mul(r as Weight)) + (49_607_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_207_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (48_736_000 as Weight) - // Standard Error: 7_000 - .saturating_add((2_097_000 as Weight).saturating_mul(r as Weight)) + (49_664_000 as Weight) + // Standard Error: 9_000 + .saturating_add((2_213_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (48_772_000 as Weight) + (49_718_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_206_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (48_827_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_082_000 as Weight).saturating_mul(r as Weight)) + (49_513_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_208_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (48_961_000 as Weight) + (49_837_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_072_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_201_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (49_069_000 as Weight) + (49_684_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_067_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_210_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (49_035_000 as Weight) + (48_749_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_872_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (48_842_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_449_000 as Weight).saturating_mul(r as Weight)) + (49_134_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_630_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (48_536_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_723_000 as Weight).saturating_mul(r as Weight)) + (48_981_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_861_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (48_851_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_432_000 as Weight).saturating_mul(r as Weight)) + (49_195_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_593_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (48_624_000 as Weight) - // Standard Error: 7_000 - .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + (49_304_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_238_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (49_348_000 as Weight) - // Standard Error: 8_000 - .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + (48_636_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_259_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (49_112_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_055_000 as Weight).saturating_mul(r as Weight)) + (48_761_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_262_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (49_654_000 as Weight) + (48_492_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_051_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_263_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (48_848_000 as Weight) + (48_736_000 as Weight) // Standard Error: 8_000 - .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_256_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (49_455_000 as Weight) + (48_675_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_054_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_256_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (49_640_000 as Weight) + (48_703_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_048_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_257_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (49_498_000 as Weight) + (48_758_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_068_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_259_000 as Weight).saturating_mul(r as Weight)) } } @@ -913,83 +913,83 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize() -> Weight { - (3_226_000 as Weight) + (2_987_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn on_initialize_per_trie_key(k: u32, ) -> Weight { (0 as Weight) // Standard Error: 2_000 - .saturating_add((2_178_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((2_201_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } // Storage: Contracts DeletionQueue (r:1 w:0) fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (78_329_000 as Weight) - // Standard Error: 1_000 - .saturating_add((353_000 as Weight).saturating_mul(q as Weight)) + (97_470_000 as Weight) + // Standard Error: 2_000 + .saturating_add((322_000 as Weight).saturating_mul(q as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) fn instrument(c: u32, ) -> Weight { - (37_190_000 as Weight) - // Standard Error: 80_000 - .saturating_add((72_791_000 as Weight).saturating_mul(c as Weight)) + (28_804_000 as Weight) + // Standard Error: 84_000 + .saturating_add((71_838_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:0) fn code_load(c: u32, ) -> Weight { - (6_191_000 as Weight) + (5_658_000 as Weight) // Standard Error: 0 - .saturating_add((1_426_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((1_425_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) fn code_refcount(c: u32, ) -> Weight { - (10_333_000 as Weight) + (9_001_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_275_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_281_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } - // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (438_556_000 as Weight) - // Standard Error: 147_000 - .saturating_add((179_307_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 9_000 - .saturating_add((2_159_000 as Weight).saturating_mul(s as Weight)) + (499_349_000 as Weight) + // Standard Error: 199_000 + .saturating_add((174_439_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 13_000 + .saturating_add((2_096_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Contracts CodeStorage (r:1 w:1) - // Storage: Contracts AccountCounter (r:1 w:0) + // Storage: Contracts AccountCounter (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn instantiate(s: u32, ) -> Weight { - (186_776_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_033_000 as Weight).saturating_mul(s as Weight)) + (181_151_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_025_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (159_247_000 as Weight) + (153_830_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -997,9 +997,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_caller(r: u32, ) -> Weight { - (422_263_000 as Weight) - // Standard Error: 159_000 - .saturating_add((125_490_000 as Weight).saturating_mul(r as Weight)) + (423_222_000 as Weight) + // Standard Error: 169_000 + .saturating_add((114_763_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1007,9 +1007,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_address(r: u32, ) -> Weight { - (423_009_000 as Weight) - // Standard Error: 183_000 - .saturating_add((125_795_000 as Weight).saturating_mul(r as Weight)) + (420_731_000 as Weight) + // Standard Error: 165_000 + .saturating_add((115_213_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1017,9 +1017,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas_left(r: u32, ) -> Weight { - (429_297_000 as Weight) - // Standard Error: 164_000 - .saturating_add((124_324_000 as Weight).saturating_mul(r as Weight)) + (422_407_000 as Weight) + // Standard Error: 176_000 + .saturating_add((113_935_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1028,9 +1028,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:0) fn seal_balance(r: u32, ) -> Weight { - (442_330_000 as Weight) - // Standard Error: 187_000 - .saturating_add((354_665_000 as Weight).saturating_mul(r as Weight)) + (425_698_000 as Weight) + // Standard Error: 210_000 + .saturating_add((335_171_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1038,9 +1038,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_value_transferred(r: u32, ) -> Weight { - (411_893_000 as Weight) - // Standard Error: 178_000 - .saturating_add((125_971_000 as Weight).saturating_mul(r as Weight)) + (410_218_000 as Weight) + // Standard Error: 187_000 + .saturating_add((115_360_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1048,9 +1048,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_minimum_balance(r: u32, ) -> Weight { - (413_273_000 as Weight) - // Standard Error: 180_000 - .saturating_add((125_103_000 as Weight).saturating_mul(r as Weight)) + (402_765_000 as Weight) + // Standard Error: 169_000 + .saturating_add((116_553_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1058,9 +1058,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_tombstone_deposit(r: u32, ) -> Weight { - (415_613_000 as Weight) - // Standard Error: 192_000 - .saturating_add((126_106_000 as Weight).saturating_mul(r as Weight)) + (404_817_000 as Weight) + // Standard Error: 173_000 + .saturating_add((115_894_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1068,9 +1068,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_block_number(r: u32, ) -> Weight { - (414_718_000 as Weight) - // Standard Error: 170_000 - .saturating_add((124_962_000 as Weight).saturating_mul(r as Weight)) + (405_604_000 as Weight) + // Standard Error: 193_000 + .saturating_add((115_757_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1078,9 +1078,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_now(r: u32, ) -> Weight { - (419_120_000 as Weight) - // Standard Error: 178_000 - .saturating_add((125_188_000 as Weight).saturating_mul(r as Weight)) + (413_577_000 as Weight) + // Standard Error: 166_000 + .saturating_add((115_115_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1089,9 +1089,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) fn seal_weight_to_fee(r: u32, ) -> Weight { - (419_125_000 as Weight) - // Standard Error: 216_000 - .saturating_add((290_592_000 as Weight).saturating_mul(r as Weight)) + (413_932_000 as Weight) + // Standard Error: 201_000 + .saturating_add((272_742_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1099,9 +1099,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_gas(r: u32, ) -> Weight { - (149_609_000 as Weight) - // Standard Error: 117_000 - .saturating_add((56_860_000 as Weight).saturating_mul(r as Weight)) + (144_109_000 as Weight) + // Standard Error: 96_000 + .saturating_add((52_461_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1109,9 +1109,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input(r: u32, ) -> Weight { - (423_570_000 as Weight) - // Standard Error: 151_000 - .saturating_add((106_985_000 as Weight).saturating_mul(r as Weight)) + (422_584_000 as Weight) + // Standard Error: 158_000 + .saturating_add((98_316_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1119,9 +1119,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_input_per_kb(n: u32, ) -> Weight { - (566_496_000 as Weight) - // Standard Error: 6_000 - .saturating_add((38_091_000 as Weight).saturating_mul(n as Weight)) + (549_530_000 as Weight) + // Standard Error: 8_000 + .saturating_add((38_025_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1129,9 +1129,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return(r: u32, ) -> Weight { - (406_811_000 as Weight) - // Standard Error: 1_833_000 - .saturating_add((6_551_000 as Weight).saturating_mul(r as Weight)) + (403_711_000 as Weight) + // Standard Error: 114_000 + .saturating_add((2_996_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1139,9 +1139,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_return_per_kb(n: u32, ) -> Weight { - (412_094_000 as Weight) + (408_252_000 as Weight) // Standard Error: 1_000 - .saturating_add((631_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((630_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1151,9 +1151,9 @@ impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: System Account (r:2 w:2) fn seal_terminate(r: u32, ) -> Weight { - (415_716_000 as Weight) - // Standard Error: 1_608_000 - .saturating_add((72_648_000 as Weight).saturating_mul(r as Weight)) + (412_619_000 as Weight) + // Standard Error: 896_000 + .saturating_add((66_155_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1164,9 +1164,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) fn seal_random(r: u32, ) -> Weight { - (421_387_000 as Weight) - // Standard Error: 275_000 - .saturating_add((393_452_000 as Weight).saturating_mul(r as Weight)) + (416_604_000 as Weight) + // Standard Error: 274_000 + .saturating_add((366_304_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1174,9 +1174,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_deposit_event(r: u32, ) -> Weight { - (428_591_000 as Weight) - // Standard Error: 293_000 - .saturating_add((690_833_000 as Weight).saturating_mul(r as Weight)) + (417_326_000 as Weight) + // Standard Error: 457_000 + .saturating_add((640_211_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1185,11 +1185,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:100 w:100) fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_245_676_000 as Weight) - // Standard Error: 2_636_000 - .saturating_add((484_691_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 519_000 - .saturating_add((165_836_000 as Weight).saturating_mul(n as Weight)) + (1_121_348_000 as Weight) + // Standard Error: 2_483_000 + .saturating_add((463_498_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 489_000 + .saturating_add((167_147_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1199,17 +1199,17 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_debug_message(r: u32, ) -> Weight { - (162_162_000 as Weight) - // Standard Error: 127_000 - .saturating_add((72_828_000 as Weight).saturating_mul(r as Weight)) + (159_880_000 as Weight) + // Standard Error: 138_000 + .saturating_add((67_837_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_set_storage(r: u32, ) -> Weight { - (399_301_000 as Weight) - // Standard Error: 221_000 - .saturating_add((245_222_000 as Weight).saturating_mul(r as Weight)) + (389_400_000 as Weight) + // Standard Error: 239_000 + .saturating_add((238_933_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) @@ -1219,26 +1219,26 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:0 w:1) fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (623_011_000 as Weight) - // Standard Error: 246_000 - .saturating_add((72_051_000 as Weight).saturating_mul(n as Weight)) + (611_980_000 as Weight) + // Standard Error: 234_000 + .saturating_add((72_047_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Skipped Metadata (r:0 w:0) fn seal_clear_storage(r: u32, ) -> Weight { - (445_102_000 as Weight) - // Standard Error: 247_000 - .saturating_add((224_384_000 as Weight).saturating_mul(r as Weight)) + (436_588_000 as Weight) + // Standard Error: 222_000 + .saturating_add((209_734_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } // Storage: Skipped Metadata (r:0 w:0) fn seal_get_storage(r: u32, ) -> Weight { - (290_227_000 as Weight) - // Standard Error: 694_000 - .saturating_add((547_193_000 as Weight).saturating_mul(r as Weight)) + (285_689_000 as Weight) + // Standard Error: 742_000 + .saturating_add((496_745_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1248,9 +1248,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: unknown [0x7afa01283080ef247df84e0ba38ea5a587d25ce6633a6bfbba02068c14023441] (r:1 w:0) fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (737_772_000 as Weight) - // Standard Error: 267_000 - .saturating_add((112_216_000 as Weight).saturating_mul(n as Weight)) + (693_967_000 as Weight) + // Standard Error: 226_000 + .saturating_add((111_370_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1259,9 +1259,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:101 w:101) fn seal_transfer(r: u32, ) -> Weight { - (383_402_000 as Weight) - // Standard Error: 2_184_000 - .saturating_add((4_335_681_000 as Weight).saturating_mul(r as Weight)) + (332_032_000 as Weight) + // Standard Error: 2_537_000 + .saturating_add((4_071_041_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -1272,8 +1272,8 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 11_019_000 - .saturating_add((39_806_777_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 10_806_000 + .saturating_add((39_442_275_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -1284,13 +1284,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:101 w:101) fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (38_662_592_000 as Weight) - // Standard Error: 52_762_000 - .saturating_add((3_888_801_000 as Weight).saturating_mul(t as Weight)) + (38_600_435_000 as Weight) + // Standard Error: 53_014_000 + .saturating_add((3_392_887_000 as Weight).saturating_mul(t as Weight)) // Standard Error: 18_000 - .saturating_add((63_571_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((63_348_000 as Weight).saturating_mul(i as Weight)) // Standard Error: 20_000 - .saturating_add((101_610_000 as Weight).saturating_mul(o as Weight)) + .saturating_add((101_366_000 as Weight).saturating_mul(o as Weight)) .saturating_add(RocksDbWeight::get().reads(104 as Weight)) .saturating_add(RocksDbWeight::get().reads((101 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes(101 as Weight)) @@ -1302,9 +1302,9 @@ impl WeightInfo for () { // Storage: Contracts AccountCounter (r:1 w:1) // Storage: System Account (r:101 w:101) fn seal_instantiate(r: u32, ) -> Weight { - (626_132_000 as Weight) - // Standard Error: 39_245_000 - .saturating_add((46_398_859_000 as Weight).saturating_mul(r as Weight)) + (643_999_000 as Weight) + // Standard Error: 37_244_000 + .saturating_add((45_559_839_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) @@ -1316,13 +1316,13 @@ impl WeightInfo for () { // Storage: Contracts AccountCounter (r:1 w:1) // Storage: System Account (r:101 w:101) fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { - (46_649_369_000 as Weight) - // Standard Error: 26_000 - .saturating_add((63_469_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 26_000 - .saturating_add((100_694_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 26_000 - .saturating_add((201_705_000 as Weight).saturating_mul(s as Weight)) + (45_415_035_000 as Weight) + // Standard Error: 30_000 + .saturating_add((63_567_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 30_000 + .saturating_add((100_900_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 30_000 + .saturating_add((201_139_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(206 as Weight)) .saturating_add(RocksDbWeight::get().writes(204 as Weight)) } @@ -1330,9 +1330,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256(r: u32, ) -> Weight { - (417_820_000 as Weight) - // Standard Error: 160_000 - .saturating_add((133_795_000 as Weight).saturating_mul(r as Weight)) + (417_335_000 as Weight) + // Standard Error: 174_000 + .saturating_add((126_268_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1340,9 +1340,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (609_012_000 as Weight) - // Standard Error: 23_000 - .saturating_add((499_227_000 as Weight).saturating_mul(n as Weight)) + (700_565_000 as Weight) + // Standard Error: 68_000 + .saturating_add((499_898_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1350,9 +1350,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256(r: u32, ) -> Weight { - (419_043_000 as Weight) - // Standard Error: 177_000 - .saturating_add((140_704_000 as Weight).saturating_mul(r as Weight)) + (416_014_000 as Weight) + // Standard Error: 168_000 + .saturating_add((134_320_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1360,9 +1360,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (564_451_000 as Weight) + (534_466_000 as Weight) // Standard Error: 19_000 - .saturating_add((346_948_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((346_588_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1370,9 +1370,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256(r: u32, ) -> Weight { - (420_951_000 as Weight) - // Standard Error: 163_000 - .saturating_add((113_596_000 as Weight).saturating_mul(r as Weight)) + (414_278_000 as Weight) + // Standard Error: 164_000 + .saturating_add((106_210_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1380,9 +1380,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (563_168_000 as Weight) - // Standard Error: 17_000 - .saturating_add((164_114_000 as Weight).saturating_mul(n as Weight)) + (569_659_000 as Weight) + // Standard Error: 16_000 + .saturating_add((163_989_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1390,9 +1390,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128(r: u32, ) -> Weight { - (418_794_000 as Weight) - // Standard Error: 167_000 - .saturating_add((113_205_000 as Weight).saturating_mul(r as Weight)) + (421_251_000 as Weight) + // Standard Error: 166_000 + .saturating_add((104_678_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1400,9 +1400,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (584_668_000 as Weight) - // Standard Error: 15_000 - .saturating_add((164_127_000 as Weight).saturating_mul(n as Weight)) + (568_490_000 as Weight) + // Standard Error: 21_000 + .saturating_add((163_999_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -1410,263 +1410,263 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) fn seal_ecdsa_recover(r: u32, ) -> Weight { - (435_443_000 as Weight) - // Standard Error: 1_408_000 - .saturating_add((15_624_877_000 as Weight).saturating_mul(r as Weight)) + (361_122_000 as Weight) + // Standard Error: 1_172_000 + .saturating_add((15_591_590_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (45_937_000 as Weight) + (46_003_000 as Weight) // Standard Error: 10_000 - .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_185_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (44_001_000 as Weight) - // Standard Error: 11_000 - .saturating_add((2_412_000 as Weight).saturating_mul(r as Weight)) + (42_908_000 as Weight) + // Standard Error: 13_000 + .saturating_add((2_570_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (43_157_000 as Weight) - // Standard Error: 12_000 - .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + (42_739_000 as Weight) + // Standard Error: 13_000 + .saturating_add((2_791_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (48_475_000 as Weight) + (47_543_000 as Weight) // Standard Error: 8_000 - .saturating_add((2_604_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_834_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (50_649_000 as Weight) - // Standard Error: 12_000 - .saturating_add((2_553_000 as Weight).saturating_mul(r as Weight)) + (50_540_000 as Weight) + // Standard Error: 13_000 + .saturating_add((2_663_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (48_433_000 as Weight) + (47_732_000 as Weight) // Standard Error: 8_000 - .saturating_add((1_670_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_771_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (49_244_000 as Weight) - // Standard Error: 16_000 - .saturating_add((1_946_000 as Weight).saturating_mul(r as Weight)) + (49_005_000 as Weight) + // Standard Error: 17_000 + .saturating_add((2_072_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (46_117_000 as Weight) + (45_975_000 as Weight) // Standard Error: 17_000 - .saturating_add((2_387_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_492_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(_e: u32, ) -> Weight { - (55_204_000 as Weight) + (55_461_000 as Weight) } fn instr_call(r: u32, ) -> Weight { - (43_651_000 as Weight) - // Standard Error: 26_000 - .saturating_add((19_163_000 as Weight).saturating_mul(r as Weight)) + (41_932_000 as Weight) + // Standard Error: 29_000 + .saturating_add((19_800_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (54_063_000 as Weight) - // Standard Error: 32_000 - .saturating_add((27_970_000 as Weight).saturating_mul(r as Weight)) + (56_550_000 as Weight) + // Standard Error: 34_000 + .saturating_add((28_414_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (88_527_000 as Weight) + (93_172_000 as Weight) // Standard Error: 6_000 - .saturating_add((958_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((1_018_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (55_066_000 as Weight) - // Standard Error: 12_000 - .saturating_add((682_000 as Weight).saturating_mul(r as Weight)) + (54_603_000 as Weight) + // Standard Error: 14_000 + .saturating_add((764_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (55_298_000 as Weight) - // Standard Error: 13_000 - .saturating_add((778_000 as Weight).saturating_mul(r as Weight)) + (54_763_000 as Weight) + // Standard Error: 14_000 + .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (56_302_000 as Weight) + (56_137_000 as Weight) // Standard Error: 11_000 - .saturating_add((1_079_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_194_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (71_567_000 as Weight) + (69_513_000 as Weight) // Standard Error: 11_000 - .saturating_add((1_107_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_125_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (71_186_000 as Weight) + (69_120_000 as Weight) // Standard Error: 12_000 - .saturating_add((1_151_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_215_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (46_240_000 as Weight) + (46_021_000 as Weight) // Standard Error: 10_000 - .saturating_add((1_044_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_103_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (52_369_000 as Weight) - // Standard Error: 2_508_000 - .saturating_add((615_448_000 as Weight).saturating_mul(r as Weight)) + (52_245_000 as Weight) + // Standard Error: 4_119_000 + .saturating_add((619_498_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (47_623_000 as Weight) + (47_314_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_720_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (47_670_000 as Weight) + (47_855_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_701_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (47_508_000 as Weight) - // Standard Error: 9_000 - .saturating_add((1_583_000 as Weight).saturating_mul(r as Weight)) + (47_704_000 as Weight) + // Standard Error: 10_000 + .saturating_add((1_708_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (48_109_000 as Weight) + (47_656_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_580_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_705_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (55_270_000 as Weight) - // Standard Error: 9_000 - .saturating_add((1_102_000 as Weight).saturating_mul(r as Weight)) + (55_202_000 as Weight) + // Standard Error: 11_000 + .saturating_add((1_229_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (55_093_000 as Weight) + (55_193_000 as Weight) // Standard Error: 9_000 - .saturating_add((1_108_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_223_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (48_265_000 as Weight) + (48_125_000 as Weight) // Standard Error: 10_000 - .saturating_add((1_573_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((1_704_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (48_733_000 as Weight) + (49_162_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_088_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_241_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (48_831_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_085_000 as Weight).saturating_mul(r as Weight)) + (48_635_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_262_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (49_147_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_056_000 as Weight).saturating_mul(r as Weight)) + (48_550_000 as Weight) + // Standard Error: 9_000 + .saturating_add((2_267_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (49_596_000 as Weight) + (49_135_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_049_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_219_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (49_872_000 as Weight) + (49_638_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_038_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_206_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (48_843_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_081_000 as Weight).saturating_mul(r as Weight)) + (49_889_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_201_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (48_765_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + (49_763_000 as Weight) + // Standard Error: 9_000 + .saturating_add((2_210_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (48_720_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_083_000 as Weight).saturating_mul(r as Weight)) + (49_607_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_207_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (48_736_000 as Weight) - // Standard Error: 7_000 - .saturating_add((2_097_000 as Weight).saturating_mul(r as Weight)) + (49_664_000 as Weight) + // Standard Error: 9_000 + .saturating_add((2_213_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (48_772_000 as Weight) + (49_718_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_206_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (48_827_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_082_000 as Weight).saturating_mul(r as Weight)) + (49_513_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_208_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (48_961_000 as Weight) + (49_837_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_072_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_201_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (49_069_000 as Weight) + (49_684_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_067_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_210_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (49_035_000 as Weight) + (48_749_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_677_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_872_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (48_842_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_449_000 as Weight).saturating_mul(r as Weight)) + (49_134_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_630_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (48_536_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_723_000 as Weight).saturating_mul(r as Weight)) + (48_981_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_861_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (48_851_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_432_000 as Weight).saturating_mul(r as Weight)) + (49_195_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_593_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (48_624_000 as Weight) - // Standard Error: 7_000 - .saturating_add((2_093_000 as Weight).saturating_mul(r as Weight)) + (49_304_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_238_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (49_348_000 as Weight) - // Standard Error: 8_000 - .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + (48_636_000 as Weight) + // Standard Error: 7_000 + .saturating_add((2_259_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (49_112_000 as Weight) - // Standard Error: 6_000 - .saturating_add((2_055_000 as Weight).saturating_mul(r as Weight)) + (48_761_000 as Weight) + // Standard Error: 8_000 + .saturating_add((2_262_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (49_654_000 as Weight) + (48_492_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_051_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_263_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (48_848_000 as Weight) + (48_736_000 as Weight) // Standard Error: 8_000 - .saturating_add((2_089_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_256_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (49_455_000 as Weight) + (48_675_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_054_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_256_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (49_640_000 as Weight) + (48_703_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_048_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_257_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (49_498_000 as Weight) + (48_758_000 as Weight) // Standard Error: 7_000 - .saturating_add((2_068_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((2_259_000 as Weight).saturating_mul(r as Weight)) } } From 1d818f38a3198f21b4ae68bac3dc98219fccc669 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Sat, 30 Oct 2021 14:38:27 +0200 Subject: [PATCH 023/162] Removal of light client from substrate (#9684) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Removal of light client from substrate * add missing import * These tests relate to there being light and non light clients. * removing lightnodes from test * cargo fmt * not needed * LightDataChecker not needed any longer * cargo fmt * Update client/service/test/src/lib.rs Co-authored-by: Bastian Köcher * Update client/service/test/src/lib.rs Co-authored-by: Bastian Köcher * cargo fmt Co-authored-by: Bastian Köcher --- Cargo.lock | 1 - bin/node-template/node/src/command.rs | 8 +- bin/node-template/node/src/service.rs | 143 +-- bin/node/cli/src/chain_spec.rs | 34 +- bin/node/cli/src/command.rs | 8 +- bin/node/cli/src/service.rs | 204 +--- .../tests/database_role_subdir_migration.rs | 116 --- client/light/src/blockchain.rs | 2 +- client/light/src/fetcher.rs | 366 ------- client/light/src/lib.rs | 20 +- client/rpc/src/state/mod.rs | 41 - client/rpc/src/state/state_light.rs | 873 ---------------- client/service/Cargo.toml | 1 - client/service/src/builder.rs | 136 +-- client/service/src/client/client.rs | 2 +- client/service/src/client/light.rs | 82 -- client/service/src/client/mod.rs | 1 - client/service/src/lib.rs | 7 +- client/service/test/src/client/db.rs | 1 + client/service/test/src/client/light.rs | 981 ------------------ client/service/test/src/client/mod.rs | 3 - client/service/test/src/lib.rs | 194 +--- 22 files changed, 83 insertions(+), 3141 deletions(-) delete mode 100644 bin/node/cli/tests/database_role_subdir_migration.rs delete mode 100644 client/light/src/fetcher.rs delete mode 100644 client/rpc/src/state/state_light.rs delete mode 100644 client/service/src/client/light.rs delete mode 100644 client/service/test/src/client/light.rs diff --git a/Cargo.lock b/Cargo.lock index ed10002bd5489..c3cfaec19532e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8329,7 +8329,6 @@ dependencies = [ "sc-executor", "sc-informant", "sc-keystore", - "sc-light", "sc-network", "sc-offchain", "sc-rpc", diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index e948c3f53b716..e1cfeaeb801e3 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -4,7 +4,7 @@ use crate::{ service, }; use node_template_runtime::Block; -use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; +use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; impl SubstrateCli for Cli { @@ -111,11 +111,7 @@ pub fn run() -> sc_cli::Result<()> { None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { - match config.role { - Role::Light => service::new_light(config), - _ => service::new_full(config), - } - .map_err(sc_cli::Error::Service) + service::new_full(config).map_err(sc_cli::Error::Service) }) }, } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 2286ad3bd654f..d673a54a94882 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -1,7 +1,7 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_client_api::ExecutorProvider; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; pub use sc_executor::NativeElseWasmExecutor; use sc_finality_grandpa::SharedVoterState; @@ -336,144 +336,3 @@ pub fn new_full(mut config: Configuration) -> Result network_starter.start_network(); Ok(task_manager) } - -/// Builds a new service for a light client. -pub fn new_light(mut config: Configuration) -> Result { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - - let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::( - &config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - - let mut telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); - - config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( - config.transaction_pool.clone(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - on_demand.clone(), - )); - - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain.clone(), - telemetry.as_ref().map(|x| x.handle()), - )?; - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - - let import_queue = - sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import.clone())), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::NeverCanAuthor, - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; - - let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( - backend.clone(), - grandpa_link.shared_authority_set().clone(), - Vec::default(), - )); - - let (network, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - warp_sync: Some(warp_sync), - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let enable_grandpa = !config.disable_grandpa; - if enable_grandpa { - let name = config.network.node_name.clone(); - - let config = sc_finality_grandpa::Config { - gossip_duration: std::time::Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore: None, - local_role: config.role.clone(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - task_manager.spawn_handle().spawn_blocking( - "grandpa-observer", - sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, - ); - } - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - remote_blockchain: Some(backend.remote_blockchain()), - transaction_pool, - task_manager: &mut task_manager, - on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_, _| Ok(())), - config, - client, - keystore: keystore_container.sync_keystore(), - backend, - network, - system_rpc_tx, - telemetry: telemetry.as_mut(), - })?; - - network_starter.start_network(); - Ok(task_manager) -} diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index b5e36d9b53629..8499c66e0c9dc 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -418,7 +418,7 @@ pub fn local_testnet_config() -> ChainSpec { #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::service::{new_full_base, new_light_base, NewFullBase}; + use crate::service::{new_full_base, NewFullBase}; use sc_service_test; use sp_runtime::BuildStorage; @@ -466,28 +466,16 @@ pub(crate) mod tests { fn test_connectivity() { sp_tracing::try_init_simple(); - sc_service_test::connectivity( - integration_test_config_with_two_authorities(), - |config| { - let NewFullBase { task_manager, client, network, transaction_pool, .. } = - new_full_base(config, |_, _| ())?; - Ok(sc_service_test::TestNetComponents::new( - task_manager, - client, - network, - transaction_pool, - )) - }, - |config| { - let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new( - keep_alive, - client, - network, - transaction_pool, - )) - }, - ); + sc_service_test::connectivity(integration_test_config_with_two_authorities(), |config| { + let NewFullBase { task_manager, client, network, transaction_pool, .. } = + new_full_base(config, |_, _| ())?; + Ok(sc_service_test::TestNetComponents::new( + task_manager, + client, + network, + transaction_pool, + )) + }); } #[test] diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 17375094f2a1b..dd8202eb71aac 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -19,7 +19,7 @@ use crate::{chain_spec, service, service::new_partial, Cli, Subcommand}; use node_executor::ExecutorDispatch; use node_runtime::{Block, RuntimeApi}; -use sc_cli::{ChainSpec, Result, Role, RuntimeVersion, SubstrateCli}; +use sc_cli::{ChainSpec, Result, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; impl SubstrateCli for Cli { @@ -77,11 +77,7 @@ pub fn run() -> Result<()> { None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { - match config.role { - Role::Light => service::new_light(config), - _ => service::new_full(config), - } - .map_err(sc_cli::Error::Service) + service::new_full(config).map_err(sc_cli::Error::Service) }) }, Some(Subcommand::Inspect(cmd)) => { diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 938f359368181..2220614ebaf2a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -26,11 +26,11 @@ use futures::prelude::*; use node_executor::ExecutorDispatch; use node_primitives::Block; use node_runtime::RuntimeApi; -use sc_client_api::{BlockBackend, ExecutorProvider, RemoteBackend}; +use sc_client_api::{BlockBackend, ExecutorProvider}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; use sc_network::{Event, NetworkService}; -use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; +use sc_service::{config::Configuration, error::Error as ServiceError, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; @@ -44,8 +44,7 @@ type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; -type LightClient = - sc_service::TLightClient>; + /// The transaction pool type defintion. pub type TransactionPool = sc_transaction_pool::FullPool; @@ -516,186 +515,9 @@ pub fn new_full(config: Configuration) -> Result { new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| task_manager) } -/// Creates a light service from the configuration. -pub fn new_light_base( - mut config: Configuration, -) -> Result< - ( - TaskManager, - RpcHandlers, - Arc, - Arc::Hash>>, - Arc< - sc_transaction_pool::LightPool>, - >, - ), - ServiceError, -> { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - - let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::( - &config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - - let mut telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); - - config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( - config.transaction_pool.clone(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - on_demand.clone(), - )); - - let (grandpa_block_import, grandpa_link) = grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain.clone(), - telemetry.as_ref().map(|x| x.handle()), - )?; - let justification_import = grandpa_block_import.clone(); - - let (babe_block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let slot_duration = babe_link.config().slot_duration(); - let import_queue = sc_consensus_babe::import_queue( - babe_link, - babe_block_import, - Some(Box::new(justification_import)), - client.clone(), - select_chain, - move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, - ); - - let uncles = - sp_authorship::InherentDataProvider::<::Header>::check_inherents(); - - Ok((timestamp, slot, uncles)) - }, - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::NeverCanAuthor, - telemetry.as_ref().map(|x| x.handle()), - )?; - - let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( - backend.clone(), - grandpa_link.shared_authority_set().clone(), - Vec::default(), - )); - - let (network, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - warp_sync: Some(warp_sync), - })?; - - let enable_grandpa = !config.disable_grandpa; - if enable_grandpa { - let name = config.network.node_name.clone(); - - let config = grandpa::Config { - gossip_duration: std::time::Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore: None, - local_role: config.role.clone(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - task_manager.spawn_handle().spawn_blocking( - "grandpa-observer", - grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, - ); - } - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let light_deps = node_rpc::LightDeps { - remote_blockchain: backend.remote_blockchain(), - fetcher: on_demand.clone(), - client: client.clone(), - pool: transaction_pool.clone(), - }; - - let rpc_extensions = node_rpc::create_light(light_deps); - - let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - on_demand: Some(on_demand), - remote_blockchain: Some(backend.remote_blockchain()), - rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), - client: client.clone(), - transaction_pool: transaction_pool.clone(), - keystore: keystore_container.sync_keystore(), - config, - backend, - system_rpc_tx, - network: network.clone(), - task_manager: &mut task_manager, - telemetry: telemetry.as_mut(), - })?; - - network_starter.start_network(); - Ok((task_manager, rpc_handlers, client, network, transaction_pool)) -} - -/// Builds a new service for a light client. -pub fn new_light(config: Configuration) -> Result { - new_light_base(config).map(|(task_manager, _, _, _, _)| task_manager) -} - #[cfg(test)] mod tests { - use crate::service::{new_full_base, new_light_base, NewFullBase}; + use crate::service::{new_full_base, NewFullBase}; use codec::Encode; use node_primitives::{Block, DigestItem, Signature}; use node_runtime::{ @@ -771,15 +593,6 @@ mod tests { ); Ok((node, setup_handles.unwrap())) }, - |config| { - let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new( - keep_alive, - client, - network, - transaction_pool, - )) - }, |service, &mut (ref mut block_import, ref babe_link)| { let parent_id = BlockId::number(service.client().chain_info().best_number); let parent_header = service.client().header(&parent_id).unwrap().unwrap(); @@ -946,15 +759,6 @@ mod tests { transaction_pool, )) }, - |config| { - let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new( - keep_alive, - client, - network, - transaction_pool, - )) - }, vec!["//Alice".into(), "//Bob".into()], ) } diff --git a/bin/node/cli/tests/database_role_subdir_migration.rs b/bin/node/cli/tests/database_role_subdir_migration.rs deleted file mode 100644 index 9338d8a8e4f43..0000000000000 --- a/bin/node/cli/tests/database_role_subdir_migration.rs +++ /dev/null @@ -1,116 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use sc_client_db::{ - light::LightStorage, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode, - TransactionStorageMode, -}; -use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; -use tempfile::tempdir; - -pub mod common; - -#[tokio::test] -#[cfg(unix)] -async fn database_role_subdir_migration() { - type Block = RawBlock>; - - let base_path = tempdir().expect("could not create a temp dir"); - let path = base_path.path().join("chains/dev/db"); - // create a dummy database dir - { - let _old_db = LightStorage::::new(DatabaseSettings { - state_cache_size: 0, - state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, - source: DatabaseSource::RocksDb { path: path.to_path_buf(), cache_size: 128 }, - keep_blocks: KeepBlocks::All, - transaction_storage: TransactionStorageMode::BlockBody, - }) - .unwrap(); - } - - assert!(path.join("db_version").exists()); - assert!(!path.join("light").exists()); - - // start a light client - common::run_node_for_a_while( - base_path.path(), - &[ - "--dev", - "--light", - "--port", - "30335", - "--rpc-port", - "44444", - "--ws-port", - "44445", - "--no-prometheus", - ], - ) - .await; - - // check if the database dir had been migrated - assert!(!path.join("db_version").exists()); - assert!(path.join("light/db_version").exists()); -} - -#[test] -#[cfg(unix)] -fn database_role_subdir_migration_fail_on_different_role() { - type Block = RawBlock>; - - let base_path = tempdir().expect("could not create a temp dir"); - let path = base_path.path().join("chains/dev/db"); - - // create a database with the old layout - { - let _old_db = LightStorage::::new(DatabaseSettings { - state_cache_size: 0, - state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, - source: DatabaseSource::RocksDb { path: path.to_path_buf(), cache_size: 128 }, - keep_blocks: KeepBlocks::All, - transaction_storage: TransactionStorageMode::BlockBody, - }) - .unwrap(); - } - - assert!(path.join("db_version").exists()); - assert!(!path.join("light/db_version").exists()); - - // start a client with a different role (full), it should fail and not change any files on disk - common::run_node_assert_fail( - &base_path.path(), - &[ - "--dev", - "--port", - "30334", - "--rpc-port", - "44446", - "--ws-port", - "44447", - "--no-prometheus", - ], - ); - - // check if the files are unchanged - assert!(path.join("db_version").exists()); - assert!(!path.join("light/db_version").exists()); - assert!(!path.join("full/db_version").exists()); -} diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs index e88c724193697..24d9ef4fd4b95 100644 --- a/client/light/src/blockchain.rs +++ b/client/light/src/blockchain.rs @@ -27,7 +27,7 @@ use sp_runtime::{ Justifications, }; -use crate::fetcher::RemoteHeaderRequest; +use sc_client_api::light::RemoteHeaderRequest; pub use sc_client_api::{ backend::{AuxStore, NewBlockState, ProvideChtRoots}, blockchain::{ diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs deleted file mode 100644 index 5740e407a5e89..0000000000000 --- a/client/light/src/fetcher.rs +++ /dev/null @@ -1,366 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client data fetcher. Fetches requested data from remote full nodes. - -use std::{ - collections::{BTreeMap, HashMap}, - marker::PhantomData, - sync::Arc, -}; - -use codec::{Decode, Encode}; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_core::{ - convert_hash, - storage::{ChildInfo, ChildType}, - traits::{CodeExecutor, SpawnNamed}, -}; -use sp_runtime::traits::{ - AtLeast32Bit, Block as BlockT, CheckedConversion, Hash, HashFor, Header as HeaderT, NumberFor, -}; -pub use sp_state_machine::StorageProof; -use sp_state_machine::{ - key_changes_proof_check_with_db, read_child_proof_check, read_proof_check, - ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, - InMemoryChangesTrieStorage, TrieBackend, -}; - -use crate::{blockchain::Blockchain, call_executor::check_execution_proof}; -pub use sc_client_api::{ - cht, - light::{ - ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, - RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, - Storage as BlockchainStorage, - }, -}; - -/// Remote data checker. -pub struct LightDataChecker> { - blockchain: Arc>, - executor: E, - spawn_handle: Box, - _marker: PhantomData, -} - -impl> LightDataChecker { - /// Create new light data checker. - pub fn new( - blockchain: Arc>, - executor: E, - spawn_handle: Box, - ) -> Self { - Self { blockchain, executor, spawn_handle, _marker: PhantomData } - } - - /// Check remote changes query proof assuming that CHT-s are of given size. - pub fn check_changes_proof_with_cht_size( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof, - cht_size: NumberFor, - ) -> ClientResult, u32)>> { - // since we need roots of all changes tries for the range begin..max - // => remote node can't use max block greater that one that we have passed - if remote_proof.max_block > request.max_block.0 || - remote_proof.max_block < request.last_block.0 - { - return Err(ClientError::ChangesTrieAccessFailed(format!( - "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", - remote_proof.max_block, - request.first_block.0, - request.last_block.0, - request.max_block.0, - )) - .into()) - } - - // check if remote node has responded with extra changes trie roots proofs - // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) - let is_extra_first_root = remote_proof - .roots - .keys() - .next() - .map(|first_root| { - *first_root < request.first_block.0 || *first_root >= request.tries_roots.0 - }) - .unwrap_or(false); - let is_extra_last_root = remote_proof - .roots - .keys() - .next_back() - .map(|last_root| *last_root >= request.tries_roots.0) - .unwrap_or(false); - if is_extra_first_root || is_extra_last_root { - return Err(ClientError::ChangesTrieAccessFailed(format!( - "Extra changes tries roots proofs provided by the remote node: [{:?}..{:?}]. Expected in range: [{}; {})", - remote_proof.roots.keys().next(), remote_proof.roots.keys().next_back(), - request.first_block.0, request.tries_roots.0, - )).into()); - } - - // if request has been composed when some required headers were already pruned - // => remote node has sent us CHT-based proof of required changes tries roots - // => check that this proof is correct before proceeding with changes proof - let remote_max_block = remote_proof.max_block; - let remote_roots = remote_proof.roots; - let remote_roots_proof = remote_proof.roots_proof; - let remote_proof = remote_proof.proof; - if !remote_roots.is_empty() { - self.check_changes_tries_proof(cht_size, &remote_roots, remote_roots_proof)?; - } - - // and now check the key changes proof + get the changes - let mut result = Vec::new(); - let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); - for config_range in &request.changes_trie_configs { - let result_range = key_changes_proof_check_with_db::, _>( - ChangesTrieConfigurationRange { - config: config_range - .config - .as_ref() - .ok_or(ClientError::ChangesTriesNotSupported)?, - zero: config_range.zero.0, - end: config_range.end.map(|(n, _)| n), - }, - &RootsStorage { - roots: (request.tries_roots.0, &request.tries_roots.2), - prev_roots: &remote_roots, - }, - &proof_storage, - request.first_block.0, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&request.last_block.1), - number: request.last_block.0, - }, - remote_max_block, - request.storage_key.as_ref(), - &request.key, - ) - .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; - result.extend(result_range); - } - - Ok(result) - } - - /// Check CHT-based proof for changes tries roots. - pub fn check_changes_tries_proof( - &self, - cht_size: NumberFor, - remote_roots: &BTreeMap, B::Hash>, - remote_roots_proof: StorageProof, - ) -> ClientResult<()> { - // all the checks are sharing the same storage - let storage = remote_roots_proof.into_memory_db(); - - // remote_roots.keys() are sorted => we can use this to group changes tries roots - // that are belongs to the same CHT - let blocks = remote_roots.keys().cloned(); - cht::for_each_cht_group::( - cht_size, - blocks, - |mut storage, _, cht_blocks| { - // get local changes trie CHT root for given CHT - // it should be there, because it is never pruned AND request has been composed - // when required header has been pruned (=> replaced with CHT) - let first_block = cht_blocks - .first() - .cloned() - .expect("for_each_cht_group never calls callback with empty groups"); - let local_cht_root = self - .blockchain - .storage() - .changes_trie_cht_root(cht_size, first_block)? - .ok_or(ClientError::InvalidCHTProof)?; - - // check changes trie root for every block within CHT range - for block in cht_blocks { - // check if the proofs storage contains the root - // normally this happens in when the proving backend is created, but since - // we share the storage for multiple checks, do it here - if !storage.contains(&local_cht_root, EMPTY_PREFIX) { - return Err(ClientError::InvalidCHTProof.into()) - } - - // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, local_cht_root); - let remote_changes_trie_root = remote_roots[&block]; - cht::check_proof_on_proving_backend::>( - local_cht_root, - block, - remote_changes_trie_root, - &proving_backend, - )?; - - // and return the storage to use in following checks - storage = proving_backend.into_storage(); - } - - Ok(storage) - }, - storage, - ) - } -} - -impl FetchChecker for LightDataChecker -where - Block: BlockT, - E: CodeExecutor + Clone + 'static, - S: BlockchainStorage, -{ - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - remote_header: Option, - remote_proof: StorageProof, - ) -> ClientResult { - let remote_header = - remote_header.ok_or_else(|| ClientError::from(ClientError::InvalidCHTProof))?; - let remote_header_hash = remote_header.hash(); - cht::check_proof::>( - request.cht_root, - request.block, - remote_header_hash, - remote_proof, - ) - .map(|_| remote_header) - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>> { - read_proof_check::, _>( - convert_hash(request.header.state_root()), - remote_proof, - request.keys.iter(), - ) - .map_err(|e| ClientError::from(e)) - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>> { - let child_info = match ChildType::from_prefixed_key(&request.storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err(ClientError::InvalidChildType), - }; - read_child_proof_check::, _>( - convert_hash(request.header.state_root()), - remote_proof, - &child_info, - request.keys.iter(), - ) - .map_err(|e| ClientError::from(e)) - } - - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: StorageProof, - ) -> ClientResult> { - check_execution_proof::<_, _, HashFor>( - &self.executor, - self.spawn_handle.clone(), - request, - remote_proof, - ) - } - - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof, - ) -> ClientResult, u32)>> { - self.check_changes_proof_with_cht_size(request, remote_proof, cht::size()) - } - - fn check_body_proof( - &self, - request: &RemoteBodyRequest, - body: Vec, - ) -> ClientResult> { - // TODO: #2621 - let extrinsics_root = - HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); - if *request.header.extrinsics_root() == extrinsics_root { - Ok(body) - } else { - Err(ClientError::ExtrinsicRootInvalid { - received: request.header.extrinsics_root().to_string(), - expected: extrinsics_root.to_string(), - }) - } - } -} - -/// A view of BTreeMap as a changes trie roots storage. -struct RootsStorage<'a, Number: AtLeast32Bit, Hash: 'a> { - roots: (Number, &'a [Hash]), - prev_roots: &'a BTreeMap, -} - -impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> -where - H: Hasher, - Number: std::fmt::Display - + std::hash::Hash - + Clone - + AtLeast32Bit - + Encode - + Decode - + Send - + Sync - + 'static, - Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, -{ - fn build_anchor( - &self, - _hash: H::Out, - ) -> Result, String> { - Err("build_anchor is only called when building block".into()) - } - - fn root( - &self, - _anchor: &ChangesTrieAnchorBlockId, - block: Number, - ) -> Result, String> { - // we can't ask for roots from parallel forks here => ignore anchor - let root = if block < self.roots.0 { - self.prev_roots.get(&Number::unique_saturated_from(block)).cloned() - } else { - let index: Option = - block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); - index.and_then(|index| self.roots.1.get(index as usize).cloned()) - }; - - Ok(root.map(|root| { - let mut hasher_root: H::Out = Default::default(); - hasher_root.as_mut().copy_from_slice(root.as_ref()); - hasher_root - })) - } -} diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs index 0c874326ef2e0..4b084cda0f8b1 100644 --- a/client/light/src/lib.rs +++ b/client/light/src/lib.rs @@ -18,36 +18,18 @@ //! Light client components. -use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_runtime::traits::{Block as BlockT, HashFor}; use std::sync::Arc; pub mod backend; pub mod blockchain; pub mod call_executor; -pub mod fetcher; pub use backend::*; pub use blockchain::*; pub use call_executor::*; -pub use fetcher::*; - -/// Create an instance of fetch data checker. -pub fn new_fetch_checker>( - blockchain: Arc>, - executor: E, - spawn_handle: Box, -) -> LightDataChecker -where - E: CodeExecutor, -{ - LightDataChecker::new(blockchain, executor, spawn_handle) -} -/// Create an instance of light client blockchain backend. -pub fn new_light_blockchain>(storage: S) -> Arc> { - Arc::new(Blockchain::new(storage)) -} +use sc_client_api::light::Storage as BlockchainStorage; /// Create an instance of light client backend. pub fn new_light_backend(blockchain: Arc>) -> Arc>> diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 80eccc2c97deb..bacf39124abc1 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -19,7 +19,6 @@ //! Substrate state API. mod state_full; -mod state_light; #[cfg(test)] mod tests; @@ -29,7 +28,6 @@ use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, Subscripti use rpc::Result as RpcResult; use std::sync::Arc; -use sc_client_api::light::{Fetcher, RemoteBlockchain}; use sc_rpc_api::{state::ReadProof, DenyUnsafe}; use sp_core::{ storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, @@ -217,45 +215,6 @@ where (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } -/// Create new state API that works on light node. -pub fn new_light>( - client: Arc, - subscriptions: SubscriptionManager, - remote_blockchain: Arc>, - fetcher: Arc, - deny_unsafe: DenyUnsafe, -) -> (State, ChildState) -where - Block: BlockT + 'static, - Block::Hash: Unpin, - BE: Backend + 'static, - Client: ExecutorProvider - + StorageProvider - + HeaderMetadata - + ProvideRuntimeApi - + HeaderBackend - + BlockchainEvents - + Send - + Sync - + 'static, - F: Send + Sync + 'static, -{ - let child_backend = Box::new(self::state_light::LightState::new( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - fetcher.clone(), - )); - - let backend = Box::new(self::state_light::LightState::new( - client, - subscriptions, - remote_blockchain, - fetcher, - )); - (State { backend, deny_unsafe }, ChildState { backend: child_backend }) -} - /// State API with subscriptions support. pub struct State { backend: Box>, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs deleted file mode 100644 index 749e57c365cc0..0000000000000 --- a/client/rpc/src/state/state_light.rs +++ /dev/null @@ -1,873 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! State API backend for light nodes. - -use codec::Decode; -use futures::{ - channel::oneshot::{channel, Sender}, - future::{ready, Either}, - Future, FutureExt, SinkExt, Stream, StreamExt as _, TryFutureExt, TryStreamExt as _, -}; -use hash_db::Hasher; -use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; -use log::warn; -use parking_lot::Mutex; -use rpc::Result as RpcResult; -use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - sync::Arc, -}; - -use sc_client_api::{ - light::{ - future_header, Fetcher, RemoteBlockchain, RemoteCallRequest, RemoteReadChildRequest, - RemoteReadRequest, - }, - BlockchainEvents, -}; -use sc_rpc_api::state::ReadProof; -use sp_blockchain::{Error as ClientError, HeaderBackend}; -use sp_core::{ - storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, - Bytes, OpaqueMetadata, -}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, HashFor}, -}; -use sp_version::RuntimeVersion; - -use super::{ - client_err, - error::{Error, FutureResult}, - ChildStateBackend, StateBackend, -}; - -/// Storage data map of storage keys => (optional) storage value. -type StorageMap = HashMap>; - -/// State API backend for light nodes. -#[derive(Clone)] -pub struct LightState, Client> { - client: Arc, - subscriptions: SubscriptionManager, - version_subscriptions: SimpleSubscriptions, - storage_subscriptions: Arc>>, - remote_blockchain: Arc>, - fetcher: Arc, -} - -/// Shared requests container. -trait SharedRequests: Clone + Send + Sync { - /// Tries to listen for already issued request, or issues request. - /// - /// Returns true if requests has been issued. - fn listen_request(&self, block: Hash, sender: Sender>) -> bool; - - /// Returns (and forgets) all listeners for given request. - fn on_response_received(&self, block: Hash) -> Vec>>; -} - -/// Storage subscriptions data. -struct StorageSubscriptions { - /// Active storage requests. - active_requests: HashMap>>>, - /// Map of subscription => keys that this subscription watch for. - keys_by_subscription: HashMap>, - /// Map of key => set of subscriptions that watch this key. - subscriptions_by_key: HashMap>, -} - -impl SharedRequests - for Arc>> -{ - fn listen_request(&self, block: Block::Hash, sender: Sender>) -> bool { - let mut subscriptions = self.lock(); - let active_requests_at = subscriptions.active_requests.entry(block).or_default(); - active_requests_at.push(sender); - active_requests_at.len() == 1 - } - - fn on_response_received(&self, block: Block::Hash) -> Vec>> { - self.lock().active_requests.remove(&block).unwrap_or_default() - } -} - -/// Simple, maybe shared, subscription data that shares per block requests. -type SimpleSubscriptions = Arc>>>>>; - -impl SharedRequests for SimpleSubscriptions -where - Hash: Send + Eq + std::hash::Hash, - V: Send, -{ - fn listen_request(&self, block: Hash, sender: Sender>) -> bool { - let mut subscriptions = self.lock(); - let active_requests_at = subscriptions.entry(block).or_default(); - active_requests_at.push(sender); - active_requests_at.len() == 1 - } - - fn on_response_received(&self, block: Hash) -> Vec>> { - self.lock().remove(&block).unwrap_or_default() - } -} - -impl + 'static, Client> LightState -where - Block: BlockT, - Client: HeaderBackend + Send + Sync + 'static, -{ - /// Create new state API backend for light nodes. - pub fn new( - client: Arc, - subscriptions: SubscriptionManager, - remote_blockchain: Arc>, - fetcher: Arc, - ) -> Self { - Self { - client, - subscriptions, - version_subscriptions: Arc::new(Mutex::new(HashMap::new())), - storage_subscriptions: Arc::new(Mutex::new(StorageSubscriptions { - active_requests: HashMap::new(), - keys_by_subscription: HashMap::new(), - subscriptions_by_key: HashMap::new(), - })), - remote_blockchain, - fetcher, - } - } - - /// Returns given block hash or best block hash if None is passed. - fn block_or_best(&self, hash: Option) -> Block::Hash { - hash.unwrap_or_else(|| self.client.info().best_hash) - } -} - -impl StateBackend for LightState -where - Block: BlockT, - Block::Hash: Unpin, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + 'static, -{ - fn call( - &self, - block: Option, - method: String, - call_data: Bytes, - ) -> FutureResult { - call( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - method, - call_data, - ) - .boxed() - } - - fn storage_keys( - &self, - _block: Option, - _prefix: StorageKey, - ) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn storage_pairs( - &self, - _block: Option, - _prefix: StorageKey, - ) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn storage_keys_paged( - &self, - _block: Option, - _prefix: Option, - _count: u32, - _start_key: Option, - ) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn storage_size(&self, _: Option, _: StorageKey) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn storage( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult> { - storage( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - vec![key.0.clone()], - ) - .map_ok(move |mut values| { - values - .remove(&key) - .expect("successful request has entries for all requested keys; qed") - }) - .boxed() - } - - fn storage_hash( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult> { - let res = StateBackend::storage(self, block, key); - async move { res.await.map(|r| r.map(|s| HashFor::::hash(&s.0))) }.boxed() - } - - fn metadata(&self, block: Option) -> FutureResult { - self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) - .and_then(|metadata| async move { - OpaqueMetadata::decode(&mut &metadata.0[..]) - .map(Into::into) - .map_err(|decode_err| { - client_err(ClientError::CallResultDecode( - "Unable to decode metadata", - decode_err, - )) - }) - }) - .boxed() - } - - fn runtime_version(&self, block: Option) -> FutureResult { - runtime_version(&*self.remote_blockchain, self.fetcher.clone(), self.block_or_best(block)) - .boxed() - } - - fn query_storage( - &self, - _from: Block::Hash, - _to: Option, - _keys: Vec, - ) -> FutureResult>> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn query_storage_at( - &self, - _keys: Vec, - _at: Option, - ) -> FutureResult>> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn read_proof( - &self, - _block: Option, - _keys: Vec, - ) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn subscribe_storage( - &self, - _meta: crate::Metadata, - subscriber: Subscriber>, - keys: Option>, - ) { - let keys = match keys { - Some(keys) if !keys.is_empty() => keys, - _ => { - warn!("Cannot subscribe to all keys on light client. Subscription rejected."); - return - }, - }; - - let keys = keys.iter().cloned().collect::>(); - let keys_to_check = keys.iter().map(|k| k.0.clone()).collect::>(); - let subscription_id = self.subscriptions.add(subscriber, move |sink| { - let fetcher = self.fetcher.clone(); - let remote_blockchain = self.remote_blockchain.clone(); - let storage_subscriptions = self.storage_subscriptions.clone(); - let initial_block = self.block_or_best(None); - let initial_keys = keys_to_check.iter().cloned().collect::>(); - - let changes_stream = subscription_stream::( - storage_subscriptions.clone(), - self.client.import_notification_stream().map(|notification| notification.hash), - display_error( - storage(&*remote_blockchain, fetcher.clone(), initial_block, initial_keys) - .map(move |r| r.map(|r| (initial_block, r))), - ), - move |block| { - // there'll be single request per block for all active subscriptions - // with all subscribed keys - let keys = storage_subscriptions - .lock() - .subscriptions_by_key - .keys() - .map(|k| k.0.clone()) - .collect(); - - storage(&*remote_blockchain, fetcher.clone(), block, keys) - }, - move |block, old_value, new_value| { - // let's only select keys which are valid for this subscription - let new_value = new_value - .iter() - .filter(|(k, _)| keys_to_check.contains(&k.0)) - .map(|(k, v)| (k.clone(), v.clone())) - .collect::>(); - let value_differs = old_value - .as_ref() - .map(|old_value| **old_value != new_value) - .unwrap_or(true); - - value_differs.then(|| StorageChangeSet { - block, - changes: new_value.iter().map(|(k, v)| (k.clone(), v.clone())).collect(), - }) - }, - ); - - changes_stream - .map_ok(Ok) - .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - - // remember keys associated with this subscription - let mut storage_subscriptions = self.storage_subscriptions.lock(); - storage_subscriptions - .keys_by_subscription - .insert(subscription_id.clone(), keys.clone()); - for key in keys { - storage_subscriptions - .subscriptions_by_key - .entry(key) - .or_default() - .insert(subscription_id.clone()); - } - } - - fn unsubscribe_storage( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - if !self.subscriptions.cancel(id.clone()) { - return Ok(false) - } - - // forget subscription keys - let mut storage_subscriptions = self.storage_subscriptions.lock(); - let keys = storage_subscriptions.keys_by_subscription.remove(&id); - for key in keys.into_iter().flat_map(|keys| keys.into_iter()) { - match storage_subscriptions.subscriptions_by_key.entry(key) { - Entry::Vacant(_) => unreachable!( - "every key from keys_by_subscription has\ - corresponding entry in subscriptions_by_key; qed" - ), - Entry::Occupied(mut entry) => { - entry.get_mut().remove(&id); - if entry.get().is_empty() { - entry.remove(); - } - }, - } - } - - Ok(true) - } - - fn subscribe_runtime_version( - &self, - _meta: crate::Metadata, - subscriber: Subscriber, - ) { - self.subscriptions.add(subscriber, move |sink| { - let fetcher = self.fetcher.clone(); - let remote_blockchain = self.remote_blockchain.clone(); - let version_subscriptions = self.version_subscriptions.clone(); - let initial_block = self.block_or_best(None); - - let versions_stream = subscription_stream::( - version_subscriptions, - self.client.import_notification_stream().map(|notification| notification.hash), - display_error( - runtime_version(&*remote_blockchain, fetcher.clone(), initial_block) - .map(move |r| r.map(|r| (initial_block, r))), - ), - move |block| runtime_version(&*remote_blockchain, fetcher.clone(), block), - |_, old_version, new_version| { - let version_differs = old_version - .as_ref() - .map(|old_version| *old_version != new_version) - .unwrap_or(true); - - version_differs.then(|| new_version.clone()) - }, - ); - - versions_stream - .map_ok(Ok) - .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - } - - fn unsubscribe_runtime_version( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } - - fn trace_block( - &self, - _block: Block::Hash, - _targets: Option, - _storage_keys: Option, - _methods: Option, - ) -> FutureResult { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } -} - -impl ChildStateBackend for LightState -where - Block: BlockT, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + 'static, -{ - fn read_child_proof( - &self, - _block: Option, - _storage_key: PrefixedStorageKey, - _keys: Vec, - ) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn storage_keys( - &self, - _block: Option, - _storage_key: PrefixedStorageKey, - _prefix: StorageKey, - ) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn storage_keys_paged( - &self, - _block: Option, - _storage_key: PrefixedStorageKey, - _prefix: Option, - _count: u32, - _start_key: Option, - ) -> FutureResult> { - async move { Err(client_err(ClientError::NotAvailableOnLightClient)) }.boxed() - } - - fn storage( - &self, - block: Option, - storage_key: PrefixedStorageKey, - key: StorageKey, - ) -> FutureResult> { - let block = self.block_or_best(block); - let fetcher = self.fetcher.clone(); - let child_storage = - resolve_header(&*self.remote_blockchain, &*self.fetcher, block).then(move |result| { - match result { - Ok(header) => Either::Left( - fetcher - .remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key, - keys: vec![key.0.clone()], - retry_count: Default::default(), - }) - .then(move |result| { - ready( - result - .map(|mut data| { - data.remove(&key.0) - .expect( - "successful result has entry for all keys; qed", - ) - .map(StorageData) - }) - .map_err(client_err), - ) - }), - ), - Err(error) => Either::Right(ready(Err(error))), - } - }); - - child_storage.boxed() - } - - fn storage_entries( - &self, - block: Option, - storage_key: PrefixedStorageKey, - keys: Vec, - ) -> FutureResult>> { - let block = self.block_or_best(block); - let fetcher = self.fetcher.clone(); - let keys = keys.iter().map(|k| k.0.clone()).collect::>(); - let child_storage = - resolve_header(&*self.remote_blockchain, &*self.fetcher, block).then(move |result| { - match result { - Ok(header) => Either::Left( - fetcher - .remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key, - keys: keys.clone(), - retry_count: Default::default(), - }) - .then(move |result| { - ready( - result - .map(|data| { - data.iter() - .filter_map(|(k, d)| { - keys.contains(k).then(|| { - d.as_ref().map(|v| StorageData(v.to_vec())) - }) - }) - .collect::>() - }) - .map_err(client_err), - ) - }), - ), - Err(error) => Either::Right(ready(Err(error))), - } - }); - - child_storage.boxed() - } - - fn storage_hash( - &self, - block: Option, - storage_key: PrefixedStorageKey, - key: StorageKey, - ) -> FutureResult> { - let child_storage = ChildStateBackend::storage(self, block, storage_key, key); - - async move { child_storage.await.map(|r| r.map(|s| HashFor::::hash(&s.0))) }.boxed() - } -} - -/// Resolve header by hash. -fn resolve_header>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: &F, - block: Block::Hash, -) -> impl std::future::Future> { - let maybe_header = future_header(remote_blockchain, fetcher, BlockId::Hash(block)); - - maybe_header.then(move |result| { - ready( - result - .and_then(|maybe_header| { - maybe_header.ok_or_else(|| ClientError::UnknownBlock(format!("{}", block))) - }) - .map_err(client_err), - ) - }) -} - -/// Call runtime method at given block -fn call>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: Arc, - block: Block::Hash, - method: String, - call_data: Bytes, -) -> impl std::future::Future> { - resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { - Ok(header) => Either::Left( - fetcher - .remote_call(RemoteCallRequest { - block, - header, - method, - call_data: call_data.0, - retry_count: Default::default(), - }) - .then(|result| ready(result.map(Bytes).map_err(client_err))), - ), - Err(error) => Either::Right(ready(Err(error))), - }) -} - -/// Get runtime version at given block. -fn runtime_version>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: Arc, - block: Block::Hash, -) -> impl std::future::Future> { - call(remote_blockchain, fetcher, block, "Core_version".into(), Bytes(Vec::new())).then( - |version| { - ready(version.and_then(|version| { - Decode::decode(&mut &version.0[..]) - .map_err(|e| client_err(ClientError::VersionInvalid(e.to_string()))) - })) - }, - ) -} - -/// Get storage value at given key at given block. -fn storage>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: Arc, - block: Block::Hash, - keys: Vec>, -) -> impl std::future::Future>, Error>> { - resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { - Ok(header) => Either::Left( - fetcher - .remote_read(RemoteReadRequest { - block, - header, - keys, - retry_count: Default::default(), - }) - .then(|result| { - ready( - result - .map(|result| { - result - .into_iter() - .map(|(key, value)| (StorageKey(key), value.map(StorageData))) - .collect() - }) - .map_err(client_err), - ) - }), - ), - Err(error) => Either::Right(ready(Err(error))), - }) -} - -/// Returns subscription stream that issues request on every imported block and -/// if value has changed from previous block, emits (stream) item. -fn subscription_stream< - Block, - Requests, - FutureBlocksStream, - V, - N, - InitialRequestFuture, - IssueRequest, - IssueRequestFuture, - CompareValues, ->( - shared_requests: Requests, - future_blocks_stream: FutureBlocksStream, - initial_request: InitialRequestFuture, - issue_request: IssueRequest, - compare_values: CompareValues, -) -> impl Stream> -where - Block: BlockT, - Requests: 'static + SharedRequests, - FutureBlocksStream: Stream, - V: Send + 'static + Clone, - InitialRequestFuture: Future> + Send + 'static, - IssueRequest: 'static + Fn(Block::Hash) -> IssueRequestFuture, - IssueRequestFuture: Future> + Send + 'static, - CompareValues: Fn(Block::Hash, Option<&V>, &V) -> Option, -{ - // we need to send initial value first, then we'll only be sending if value has changed - let previous_value = Arc::new(Mutex::new(None)); - - // prepare 'stream' of initial values - let initial_value_stream = initial_request.into_stream(); - - // prepare stream of future values - // - // we do not want to stop stream if single request fails - // (the warning should have been already issued by the request issuer) - let future_values_stream = future_blocks_stream - .then(move |block| { - maybe_share_remote_request::( - shared_requests.clone(), - block, - &issue_request, - ) - .map(move |r| r.map(|v| (block, v))) - }) - .filter(|r| ready(r.is_ok())); - - // now let's return changed values for selected blocks - initial_value_stream - .chain(future_values_stream) - .try_filter_map(move |(block, new_value)| { - let mut previous_value = previous_value.lock(); - let res = compare_values(block, previous_value.as_ref(), &new_value).map( - |notification_value| { - *previous_value = Some(new_value); - notification_value - }, - ); - async move { Ok(res) } - }) - .map_err(|_| ()) -} - -/// Request some data from remote node, probably reusing response from already -/// (in-progress) existing request. -fn maybe_share_remote_request( - shared_requests: Requests, - block: Block::Hash, - issue_request: &IssueRequest, -) -> impl std::future::Future> -where - V: Clone, - Requests: SharedRequests, - IssueRequest: Fn(Block::Hash) -> IssueRequestFuture, - IssueRequestFuture: std::future::Future>, -{ - let (sender, receiver) = channel(); - let need_issue_request = shared_requests.listen_request(block, sender); - - // if that isn't the first request - just listen for existing request' response - if !need_issue_request { - return Either::Right(receiver.then(|r| ready(r.unwrap_or(Err(()))))) - } - - // that is the first request - issue remote request + notify all listeners on - // completion - Either::Left(display_error(issue_request(block)).then(move |remote_result| { - let listeners = shared_requests.on_response_received(block); - // skip first element, because this future is the first element - for receiver in listeners.into_iter().skip(1) { - if let Err(_) = receiver.send(remote_result.clone()) { - // we don't care if receiver has been dropped already - } - } - - ready(remote_result) - })) -} - -/// Convert successful future result into Ok(result) and error into Err(()), -/// displaying warning. -fn display_error(future: F) -> impl std::future::Future> -where - F: std::future::Future>, -{ - future.then(|result| { - ready(result.or_else(|err| { - warn!("Remote request for subscription data has failed with: {:?}", err); - Err(()) - })) - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use futures::{executor, stream}; - use sp_core::H256; - use substrate_test_runtime_client::runtime::Block; - - #[test] - fn subscription_stream_works() { - let stream = subscription_stream::( - SimpleSubscriptions::default(), - stream::iter(vec![H256::from([2; 32]), H256::from([3; 32])]), - ready(Ok((H256::from([1; 32]), 100))), - |block| match block[0] { - 2 => ready(Ok(100)), - 3 => ready(Ok(200)), - _ => unreachable!("should not issue additional requests"), - }, - |_, old_value, new_value| match old_value == Some(new_value) { - true => None, - false => Some(new_value.clone()), - }, - ); - - assert_eq!(executor::block_on(stream.collect::>()), vec![Ok(100), Ok(200)]); - } - - #[test] - fn subscription_stream_ignores_failed_requests() { - let stream = subscription_stream::( - SimpleSubscriptions::default(), - stream::iter(vec![H256::from([2; 32]), H256::from([3; 32])]), - ready(Ok((H256::from([1; 32]), 100))), - |block| match block[0] { - 2 => ready(Err(client_err(ClientError::NotAvailableOnLightClient))), - 3 => ready(Ok(200)), - _ => unreachable!("should not issue additional requests"), - }, - |_, old_value, new_value| match old_value == Some(new_value) { - true => None, - false => Some(new_value.clone()), - }, - ); - - assert_eq!(executor::block_on(stream.collect::>()), vec![Ok(100), Ok(200)]); - } - - #[test] - fn maybe_share_remote_request_shares_request() { - type UnreachableFuture = futures::future::Ready>; - - let shared_requests = SimpleSubscriptions::default(); - - // let's 'issue' requests for B1 - shared_requests.lock().insert(H256::from([1; 32]), vec![channel().0]); - - // make sure that no additional requests are issued when we're asking for B1 - let _ = maybe_share_remote_request::( - shared_requests.clone(), - H256::from([1; 32]), - &|_| unreachable!("no duplicate requests issued"), - ); - - // make sure that additional requests is issued when we're asking for B2 - let request_issued = Arc::new(Mutex::new(false)); - let _ = maybe_share_remote_request::( - shared_requests.clone(), - H256::from([2; 32]), - &|_| { - *request_issued.lock() = true; - ready(Ok(Default::default())) - }, - ); - assert!(*request_issued.lock()); - } -} diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 589d7848a5b28..c3ae1452042f4 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -53,7 +53,6 @@ sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } -sc-light = { version = "4.0.0-dev", path = "../light" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index e01a85878817c..bcb05ce743701 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -18,7 +18,7 @@ use crate::{ build_network_future, - client::{light, Client, ClientConfig}, + client::{Client, ClientConfig}, config::{Configuration, KeystoreConfig, PrometheusConfig, TransactionStorageMode}, error::Error, metrics::MetricsService, @@ -58,7 +58,7 @@ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, BlockIdTo, HashFor, Zero}, + traits::{Block as BlockT, BlockIdTo, Zero}, BuildStorage, }; use std::{str::FromStr, sync::Arc, time::SystemTime}; @@ -137,47 +137,9 @@ pub type TFullBackend = sc_client_db::Backend; pub type TFullCallExecutor = crate::client::LocalCallExecutor, TExec>; -/// Light client type. -pub type TLightClient = - TLightClientWithBackend>; - -/// Light client backend type. -pub type TLightBackend = - sc_light::Backend, HashFor>; - -/// Light call executor type. -pub type TLightCallExecutor = sc_light::GenesisCallExecutor< - sc_light::Backend, HashFor>, - crate::client::LocalCallExecutor< - TBl, - sc_light::Backend, HashFor>, - TExec, - >, ->; - type TFullParts = (TFullClient, Arc>, KeystoreContainer, TaskManager); -type TLightParts = ( - Arc>, - Arc>, - KeystoreContainer, - TaskManager, - Arc>, -); - -/// Light client backend type with a specific hash type. -pub type TLightBackendWithHash = - sc_light::Backend, THash>; - -/// Light client type with a specific backend. -pub type TLightClientWithBackend = Client< - TBackend, - sc_light::GenesisCallExecutor>, - TBl, - TRtApi, ->; - trait AsCryptoStoreRef { fn keystore_ref(&self) -> Arc; fn sync_keystore_ref(&self) -> Arc; @@ -359,53 +321,6 @@ where Ok((client, backend, keystore_container, task_manager)) } -/// Create the initial parts of a light node. -pub fn new_light_parts( - config: &Configuration, - telemetry: Option, - executor: TExec, -) -> Result, Error> -where - TBl: BlockT, - TExec: CodeExecutor + RuntimeVersionOf + Clone, -{ - let keystore_container = KeystoreContainer::new(&config.keystore)?; - let task_manager = { - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.tokio_handle.clone(), registry)? - }; - - let db_storage = { - let db_settings = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), - state_pruning: config.state_pruning.clone(), - source: config.database.clone(), - keep_blocks: config.keep_blocks.clone(), - transaction_storage: config.transaction_storage.clone(), - }; - sc_client_db::light::LightStorage::new(db_settings)? - }; - let light_blockchain = sc_light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new(sc_light::new_fetch_checker::<_, TBl, _>( - light_blockchain.clone(), - executor.clone(), - Box::new(task_manager.spawn_handle()), - )); - let on_demand = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); - let backend = sc_light::new_light_backend(light_blockchain); - let client = Arc::new(light::new_light( - backend.clone(), - config.chain_spec.as_storage_builder(), - executor, - Box::new(task_manager.spawn_handle()), - config.prometheus_config.as_ref().map(|config| config.registry.clone()), - telemetry, - )?); - - Ok((client, backend, keystore_container, task_manager, on_demand)) -} - /// Create an instance of default DB-backend backend. pub fn new_db_backend( settings: DatabaseSettings, @@ -559,12 +474,12 @@ where mut config, task_manager, client, - on_demand, + on_demand: _, backend, keystore, transaction_pool, rpc_extensions_builder, - remote_blockchain, + remote_blockchain: _, network, system_rpc_tx, telemetry, @@ -630,8 +545,6 @@ where client.clone(), transaction_pool.clone(), keystore.clone(), - on_demand.clone(), - remote_blockchain.clone(), &*rpc_extensions_builder, backend.offchain_storage(), system_rpc_tx.clone(), @@ -729,8 +642,6 @@ fn gen_handler( client: Arc, transaction_pool: Arc, keystore: SyncCryptoStorePtr, - on_demand: Option>>, - remote_blockchain: Option>>, rpc_extensions_builder: &(dyn RpcExtensionBuilder + Send), offchain_storage: Option<>::OffchainStorage>, system_rpc_tx: TracingUnboundedSender>, @@ -769,34 +680,17 @@ where let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle); let subscriptions = SubscriptionManager::new(Arc::new(task_executor.clone())); - let (chain, state, child_state) = - if let (Some(remote_blockchain), Some(on_demand)) = (remote_blockchain, on_demand) { - // Light clients - let chain = sc_rpc::chain::new_light( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - on_demand.clone(), - ); - let (state, child_state) = sc_rpc::state::new_light( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - on_demand, - deny_unsafe, - ); - (chain, state, child_state) - } else { - // Full nodes - let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); - let (state, child_state) = sc_rpc::state::new_full( - client.clone(), - subscriptions.clone(), - deny_unsafe, - config.rpc_max_payload, - ); - (chain, state, child_state) - }; + let (chain, state, child_state) = { + // Full nodes + let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); + let (state, child_state) = sc_rpc::state::new_full( + client.clone(), + subscriptions.clone(), + deny_unsafe, + config.rpc_max_payload, + ); + (chain, state, child_state) + }; let author = sc_rpc::author::Author::new(client, transaction_pool, subscriptions, keystore, deny_unsafe); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index d35c0462b8b05..4e3cb0aaf234b 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -42,6 +42,7 @@ use sc_client_api::{ ProvideUncles, }, execution_extensions::ExecutionExtensions, + light::ChangesProof, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeyIterator, ProofProvider, UsageProvider, }; @@ -49,7 +50,6 @@ use sc_consensus::{ BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, }; use sc_executor::RuntimeVersion; -use sc_light::fetcher::ChangesProof; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sp_api::{ ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi, diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs deleted file mode 100644 index 7c13b98843e05..0000000000000 --- a/client/service/src/client/light.rs +++ /dev/null @@ -1,82 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client utilities. - -use std::sync::Arc; - -use prometheus_endpoint::Registry; -use sc_executor::RuntimeVersionOf; -use sc_telemetry::TelemetryHandle; -use sp_blockchain::Result as ClientResult; -use sp_core::traits::{CodeExecutor, SpawnNamed}; -use sp_runtime::{ - traits::{Block as BlockT, HashFor}, - BuildStorage, -}; - -use super::{ - call_executor::LocalCallExecutor, - client::{Client, ClientConfig}, -}; -use sc_client_api::light::Storage as BlockchainStorage; -use sc_light::{Backend, GenesisCallExecutor}; - -/// Create an instance of light client. -pub fn new_light( - backend: Arc>>, - genesis_storage: &dyn BuildStorage, - code_executor: E, - spawn_handle: Box, - prometheus_registry: Option, - telemetry: Option, -) -> ClientResult< - Client< - Backend>, - GenesisCallExecutor< - Backend>, - LocalCallExecutor>, E>, - >, - B, - RA, - >, -> -where - B: BlockT, - S: BlockchainStorage + 'static, - E: CodeExecutor + RuntimeVersionOf + Clone + 'static, -{ - let local_executor = LocalCallExecutor::new( - backend.clone(), - code_executor, - spawn_handle.clone(), - ClientConfig::default(), - )?; - let executor = GenesisCallExecutor::new(backend.clone(), local_executor); - Client::new( - backend, - executor, - genesis_storage, - Default::default(), - Default::default(), - Default::default(), - prometheus_registry, - telemetry, - ClientConfig::default(), - ) -} diff --git a/client/service/src/client/mod.rs b/client/service/src/client/mod.rs index 754309e864ebd..7743f479a1713 100644 --- a/client/service/src/client/mod.rs +++ b/client/service/src/client/mod.rs @@ -49,7 +49,6 @@ mod block_rules; mod call_executor; mod client; pub mod genesis; -pub mod light; mod wasm_override; mod wasm_substitutes; diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 8d8c54cc25f29..a1ff8da4085c9 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -49,10 +49,9 @@ use sp_runtime::{ pub use self::{ builder::{ build_network, build_offchain_workers, new_client, new_db_backend, new_full_client, - new_full_parts, new_light_parts, spawn_tasks, BuildNetworkParams, KeystoreContainer, - NetworkStarter, NoopRpcExtensionBuilder, RpcExtensionBuilder, SpawnTasksParams, - TFullBackend, TFullCallExecutor, TFullClient, TLightBackend, TLightBackendWithHash, - TLightCallExecutor, TLightClient, TLightClientWithBackend, + new_full_parts, spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, + NoopRpcExtensionBuilder, RpcExtensionBuilder, SpawnTasksParams, TFullBackend, + TFullCallExecutor, TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, diff --git a/client/service/test/src/client/db.rs b/client/service/test/src/client/db.rs index 5278c9a13a4d7..772fdcada72ef 100644 --- a/client/service/test/src/client/db.rs +++ b/client/service/test/src/client/db.rs @@ -21,6 +21,7 @@ use std::sync::Arc; type TestBackend = sc_client_api::in_mem::Backend; + #[test] fn test_leaves_with_complex_block_tree() { let backend = Arc::new(TestBackend::new()); diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs deleted file mode 100644 index fb9566d208f76..0000000000000 --- a/client/service/test/src/client/light.rs +++ /dev/null @@ -1,981 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::prepare_client_with_key_changes; -use parity_scale_codec::{Decode, Encode}; -use parking_lot::Mutex; -use sc_block_builder::BlockBuilderProvider; -use sc_client_api::{ - backend::NewBlockState, - blockchain::Info, - cht, - in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, - AuxStore, Backend as ClientBackend, BlockBackend, BlockImportOperation, CallExecutor, - ChangesProof, ExecutionStrategy, FetchChecker, ProofProvider, ProvideChtRoots, - RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, - RemoteReadChildRequest, RemoteReadRequest, Storage, StorageProof, StorageProvider, -}; -use sc_executor::{NativeElseWasmExecutor, RuntimeVersion, WasmExecutionMethod}; -use sc_light::{ - backend::{Backend, GenesisOrUnavailableState}, - blockchain::{Blockchain, BlockchainCache}, - call_executor::{check_execution_proof, GenesisCallExecutor}, - fetcher::LightDataChecker, -}; -use sp_api::{ProofRecorder, StorageTransactionCache}; -use sp_blockchain::{ - well_known_cache_keys, BlockStatus, CachedHeaderMetadata, Error as ClientError, HeaderBackend, - Result as ClientResult, -}; -use sp_consensus::BlockOrigin; -use sp_core::{testing::TaskExecutor, NativeOrEncoded, H256}; -use sp_externalities::Extensions; -use sp_runtime::{ - generic::BlockId, - traits::{BlakeTwo256, Block as _, Header as HeaderT, NumberFor}, - Digest, Justifications, -}; -use sp_state_machine::{ExecutionManager, OverlayedChanges}; -use std::{cell::RefCell, collections::HashMap, panic::UnwindSafe, sync::Arc}; -use substrate_test_runtime_client::{ - runtime::{self, Block, Extrinsic, Hash, Header}, - AccountKeyring, ClientBlockImportExt, TestClient, -}; - -use sp_core::{ - blake2_256, - storage::{well_known_keys, ChildInfo, StorageKey}, - ChangesTrieConfiguration, -}; -use sp_state_machine::Backend as _; - -pub type DummyBlockchain = Blockchain; - -pub struct DummyStorage { - pub changes_tries_cht_roots: HashMap, - pub aux_store: Mutex, Vec>>, -} - -impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - changes_tries_cht_roots: HashMap::new(), - aux_store: Mutex::new(HashMap::new()), - } - } -} - -impl sp_blockchain::HeaderBackend for DummyStorage { - fn header(&self, _id: BlockId) -> ClientResult> { - Err(ClientError::Backend("Test error".into())) - } - - fn info(&self) -> Info { - panic!("Test error") - } - - fn status(&self, _id: BlockId) -> ClientResult { - Err(ClientError::Backend("Test error".into())) - } - - fn number(&self, hash: Hash) -> ClientResult>> { - if hash == Default::default() { - Ok(Some(Default::default())) - } else { - Err(ClientError::Backend("Test error".into())) - } - } - - fn hash(&self, number: u64) -> ClientResult> { - if number == 0 { - Ok(Some(Default::default())) - } else { - Err(ClientError::Backend("Test error".into())) - } - } -} - -impl sp_blockchain::HeaderMetadata for DummyStorage { - type Error = ClientError; - - fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))? - .map(|header| CachedHeaderMetadata::from(&header)) - .ok_or(ClientError::UnknownBlock("header not found".to_owned())) - } - fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} - fn remove_header_metadata(&self, _hash: Hash) {} -} - -impl AuxStore for DummyStorage { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - insert: I, - _delete: D, - ) -> ClientResult<()> { - for (k, v) in insert.into_iter() { - self.aux_store.lock().insert(k.to_vec(), v.to_vec()); - } - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.aux_store.lock().get(key).cloned()) - } -} - -impl Storage for DummyStorage { - fn import_header( - &self, - _header: Header, - _cache: HashMap>, - _state: NewBlockState, - _aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - Ok(()) - } - - fn set_head(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientError::Backend("Test error".into())) - } - - fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientError::Backend("Test error".into())) - } - - fn last_finalized(&self) -> ClientResult { - Err(ClientError::Backend("Test error".into())) - } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } -} - -impl ProvideChtRoots for DummyStorage { - fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { - Err(ClientError::Backend("Test error".into())) - } - - fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult> { - cht::block_to_cht_number(cht_size, block) - .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) - .cloned() - .ok_or_else(|| { - ClientError::Backend(format!("Test error: CHT for block #{} not found", block)) - .into() - }) - .map(Some) - } -} - -struct DummyCallExecutor; - -impl CallExecutor for DummyCallExecutor { - type Error = ClientError; - - type Backend = substrate_test_runtime_client::Backend; - - fn call( - &self, - _id: &BlockId, - _method: &str, - _call_data: &[u8], - _strategy: ExecutionStrategy, - _extensions: Option, - ) -> Result, ClientError> { - Ok(vec![42]) - } - - fn contextual_call< - EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> Result + UnwindSafe, - >( - &self, - _at: &BlockId, - _method: &str, - _call_data: &[u8], - _changes: &RefCell, - _storage_transaction_cache: Option< - &RefCell< - StorageTransactionCache< - Block, - >::State, - >, - >, - >, - _execution_manager: ExecutionManager, - _native_call: Option, - _proof_recorder: &Option>, - _extensions: Option, - ) -> ClientResult> - where - ExecutionManager: Clone, - { - unreachable!() - } - - fn runtime_version(&self, _id: &BlockId) -> Result { - unreachable!() - } - - fn prove_execution( - &self, - _: &BlockId, - _: &str, - _: &[u8], - ) -> Result<(Vec, StorageProof), ClientError> { - unreachable!() - } -} - -fn local_executor() -> NativeElseWasmExecutor -{ - NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8) -} - -#[test] -fn local_state_is_created_when_genesis_state_is_available() { - let def = Default::default(); - let header0 = - substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - - let backend: Backend<_, BlakeTwo256> = - Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header0, None, None, None, NewBlockState::Final).unwrap(); - op.set_genesis_state(Default::default(), true).unwrap(); - backend.commit_operation(op).unwrap(); - - match backend.state_at(BlockId::Number(0)).unwrap() { - GenesisOrUnavailableState::Genesis(_) => (), - _ => panic!("unexpected state"), - } -} - -#[test] -fn unavailable_state_is_created_when_genesis_state_is_unavailable() { - let backend: Backend<_, BlakeTwo256> = - Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - - match backend.state_at(BlockId::Number(0)).unwrap() { - GenesisOrUnavailableState::Unavailable => (), - _ => panic!("unexpected state"), - } -} - -#[test] -fn light_aux_store_is_updated_via_non_importing_op() { - let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - let mut op = ClientBackend::::begin_operation(&backend).unwrap(); - BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); - ClientBackend::::commit_operation(&backend, op).unwrap(); - - assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); -} - -#[test] -fn execution_proof_is_generated_and_checked() { - fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { - let remote_block_id = BlockId::Number(at); - let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - - // 'fetch' execution proof from remote node - let (remote_result, remote_execution_proof) = - remote_client.execution_proof(&remote_block_id, method, &[]).unwrap(); - - // check remote execution proof locally - let local_result = check_execution_proof::<_, _, BlakeTwo256>( - &local_executor(), - Box::new(TaskExecutor::new()), - &RemoteCallRequest { - block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header, - method: method.into(), - call_data: vec![], - retry_count: None, - }, - remote_execution_proof, - ) - .unwrap(); - - (remote_result, local_result) - } - - fn execute_with_proof_failure(remote_client: &TestClient, at: u64) { - let remote_block_id = BlockId::Number(at); - let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - - // 'fetch' execution proof from remote node - let (_, remote_execution_proof) = remote_client - .execution_proof( - &remote_block_id, - "Core_initialize_block", - &Header::new( - at, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - .encode(), - ) - .unwrap(); - - // check remote execution proof locally - let execution_result = check_execution_proof::<_, _, BlakeTwo256>( - &local_executor(), - Box::new(TaskExecutor::new()), - &RemoteCallRequest { - block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header.clone(), - method: "Core_initialize_block".into(), - call_data: Header::new( - at + 1, - Default::default(), - Default::default(), - remote_header.hash(), - remote_header.digest().clone(), // this makes next header wrong - ) - .encode(), - retry_count: None, - }, - remote_execution_proof, - ); - match execution_result { - Err(sp_blockchain::Error::Execution(_)) => (), - _ => panic!("Unexpected execution result: {:?}", execution_result), - } - } - - // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); - for i in 1u32..3u32 { - let mut digest = Digest::default(); - digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); - futures::executor::block_on(remote_client.import_justified( - BlockOrigin::Own, - remote_client.new_block(digest).unwrap().build().unwrap().block, - Justifications::from((*b"TEST", Default::default())), - )) - .unwrap(); - } - - // check method that doesn't requires environment - let (remote, local) = execute(&remote_client, 0, "Core_version"); - assert_eq!(remote, local); - - let (remote, local) = execute(&remote_client, 2, "Core_version"); - assert_eq!(remote, local); - - // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set - execute_with_proof_failure(&remote_client, 2); - - // check that proof check doesn't panic even if proof is incorrect AND panic handler is set - sp_panic_handler::set("TEST", "1.2.3"); - execute_with_proof_failure(&remote_client, 2); -} - -#[test] -fn code_is_executed_at_genesis_only() { - let backend = Arc::new(InMemBackend::::new()); - let def = H256::default(); - let header0 = - substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - let hash0 = header0.hash(); - let header1 = - substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); - let hash1 = header1.hash(); - backend - .blockchain() - .insert(hash0, header0, None, None, NewBlockState::Final) - .unwrap(); - backend - .blockchain() - .insert(hash1, header1, None, None, NewBlockState::Final) - .unwrap(); - - let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); - assert_eq!( - genesis_executor - .call(&BlockId::Number(0), "test_method", &[], ExecutionStrategy::NativeElseWasm, None,) - .unwrap(), - vec![42], - ); - - let call_on_unavailable = genesis_executor.call( - &BlockId::Number(1), - "test_method", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ); - - match call_on_unavailable { - Err(ClientError::NotAvailableOnLightClient) => (), - _ => unreachable!("unexpected result: {:?}", call_on_unavailable), - } -} - -type TestChecker = LightDataChecker< - NativeElseWasmExecutor, - Block, - DummyStorage, ->; - -fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { - // prepare remote client - let remote_client = substrate_test_runtime_client::new(); - let remote_block_id = BlockId::Number(0); - let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); - let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client - .state_at(&remote_block_id) - .unwrap() - .storage_root(::std::iter::empty()) - .0 - .into(); - - // 'fetch' read proof from remote node - let heap_pages = remote_client - .storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) - .unwrap() - .and_then(|v| Decode::decode(&mut &v.0[..]).ok()) - .unwrap(); - let remote_read_proof = remote_client - .read_proof(&remote_block_id, &mut std::iter::once(well_known_keys::HEAP_PAGES)) - .unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - local_storage - .insert(remote_block_hash, remote_block_header.clone(), None, None, NewBlockState::Final) - .unwrap(); - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - (local_checker, remote_block_header, remote_read_proof, heap_pages) -} - -fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; - let child_info = ChildInfo::new_default(b"child1"); - let child_info = &child_info; - // prepare remote client - let remote_client = substrate_test_runtime_client::TestClientBuilder::new() - .add_extra_child_storage(child_info, b"key1".to_vec(), b"value1".to_vec()) - .build(); - let remote_block_id = BlockId::Number(0); - let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); - let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client - .state_at(&remote_block_id) - .unwrap() - .storage_root(::std::iter::empty()) - .0 - .into(); - - // 'fetch' child read proof from remote node - let child_value = remote_client - .child_storage(&remote_block_id, child_info, &StorageKey(b"key1".to_vec())) - .unwrap() - .unwrap() - .0; - assert_eq!(b"value1"[..], child_value[..]); - let remote_read_proof = remote_client - .read_child_proof(&remote_block_id, child_info, &mut std::iter::once("key1".as_bytes())) - .unwrap(); - - // check locally - let local_storage = InMemoryBlockchain::::new(); - local_storage - .insert(remote_block_hash, remote_block_header.clone(), None, None, NewBlockState::Final) - .unwrap(); - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - (local_checker, remote_block_header, remote_read_proof, child_value) -} - -fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { - // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); - let mut local_headers_hashes = Vec::new(); - for i in 0..4 { - let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; - futures::executor::block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); - local_headers_hashes.push( - remote_client - .block_hash(i + 1) - .map_err(|_| ClientError::Backend("TestError".into())), - ); - } - - // 'fetch' header proof from remote node - let remote_block_id = BlockId::Number(1); - let (remote_block_header, remote_header_proof) = - remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - let local_cht_root = - cht::compute_root::(4, 0, local_headers_hashes).unwrap(); - if insert_cht { - local_storage.insert_cht_root(1, local_cht_root); - } - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - (local_checker, local_cht_root, remote_block_header, remote_header_proof) -} - -fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { - use sp_trie::{trie_types::Layout, TrieConfiguration}; - let iter = extrinsics.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter); - - // only care about `extrinsics_root` - Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) -} - -#[test] -fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, heap_pages) = - prepare_for_read_proof_check(); - assert_eq!( - (&local_checker as &dyn FetchChecker) - .check_read_proof( - &RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, - remote_read_proof - ) - .unwrap() - .remove(well_known_keys::HEAP_PAGES) - .unwrap() - .unwrap()[0], - heap_pages as u8 - ); -} - -#[test] -fn storage_child_read_proof_is_generated_and_checked() { - let child_info = ChildInfo::new_default(&b"child1"[..]); - let (local_checker, remote_block_header, remote_read_proof, result) = - prepare_for_read_child_proof_check(); - assert_eq!( - (&local_checker as &dyn FetchChecker) - .check_read_child_proof( - &RemoteReadChildRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - storage_key: child_info.prefixed_storage_key(), - keys: vec![b"key1".to_vec()], - retry_count: None, - }, - remote_read_proof - ) - .unwrap() - .remove(b"key1".as_ref()) - .unwrap() - .unwrap(), - result - ); -} - -#[test] -fn header_proof_is_generated_and_checked() { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true); - assert_eq!( - (&local_checker as &dyn FetchChecker) - .check_header_proof( - &RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, - Some(remote_block_header.clone()), - remote_header_proof - ) - .unwrap(), - remote_block_header - ); -} - -#[test] -fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker) - .check_header_proof( - &RemoteHeaderRequest::
{ - cht_root: Default::default(), - block: 1, - retry_count: None, - }, - Some(remote_block_header.clone()), - remote_header_proof - ) - .is_err()); -} - -#[test] -fn check_header_proof_fails_if_invalid_header_provided() { - let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker) - .check_header_proof( - &RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, - Some(remote_block_header.clone()), - remote_header_proof - ) - .is_err()); -} - -#[test] -fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - let local_checker = &local_checker as &dyn FetchChecker; - let max = remote_client.chain_info().best_number; - let max_hash = remote_client.chain_info().best_hash; - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client - .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) - .unwrap(); - - // check proof on local client - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range), - key: key.0, - storage_key: None, - retry_count: None, - }; - let local_result = local_checker - .check_changes_proof( - &request, - ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }, - ) - .unwrap(); - - // ..and ensure that result is the same as on remote node - if local_result != expected_result { - panic!( - "Failed test {}: local = {:?}, expected = {:?}", - index, local_result, expected_result, - ); - } - } -} - -#[test] -fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client - .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) - .unwrap(); - - // prepare local checker, having a root of changes trie CHT#0 - let local_cht_root = cht::compute_root::( - 4, - 0, - remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), - ) - .unwrap(); - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - local_executor(), - Box::new(TaskExecutor::new()), - ); - - // check proof on local client - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (1, b1), - last_block: (4, b4), - max_block: (4, b4), - tries_roots: (3, b3, vec![remote_roots[2].clone(), remote_roots[3].clone()]), - storage_key: None, - key: dave.0, - retry_count: None, - }; - let local_result = local_checker - .check_changes_proof_with_cht_size( - &request, - ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }, - 4, - ) - .unwrap(); - - assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); -} - -#[test] -fn check_changes_proof_fails_if_proof_is_wrong() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - let local_checker = &local_checker as &dyn FetchChecker; - let max = remote_client.chain_info().best_number; - let max_hash = remote_client.chain_info().best_hash; - - let (begin, end, key, _) = test_cases[0].clone(); - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client - .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) - .unwrap(); - - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range.clone()), - storage_key: None, - key: key.0, - retry_count: None, - }; - - // check proof on local client using max from the future - assert!(local_checker - .check_changes_proof( - &request, - ChangesProof { - max_block: remote_proof.max_block + 1, - proof: remote_proof.proof.clone(), - roots: remote_proof.roots.clone(), - roots_proof: remote_proof.roots_proof.clone(), - } - ) - .is_err()); - - // check proof on local client using broken proof - assert!(local_checker - .check_changes_proof( - &request, - ChangesProof { - max_block: remote_proof.max_block, - proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - } - ) - .is_err()); - - // extra roots proofs are provided - assert!(local_checker - .check_changes_proof( - &request, - ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(begin - 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - } - ) - .is_err()); - assert!(local_checker - .check_changes_proof( - &request, - ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(end + 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - } - ) - .is_err()); -} - -#[test] -fn check_changes_tries_proof_fails_if_proof_is_wrong() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let local_cht_root = cht::compute_root::( - 4, - 0, - remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), - ) - .unwrap(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client - .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) - .unwrap(); - - // fails when changes trie CHT is missing from the local db - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - assert!(local_checker - .check_changes_tries_proof(4, &remote_proof.roots, remote_proof.roots_proof.clone()) - .is_err()); - - // fails when proof is broken - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - local_executor(), - Box::new(TaskExecutor::new()), - ); - let result = - local_checker.check_changes_tries_proof(4, &remote_proof.roots, StorageProof::empty()); - assert!(result.is_err()); -} - -#[test] -fn check_body_proof_faulty() { - let header = - header_with_computed_extrinsics_root(vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])]); - let block = Block::new(header.clone(), Vec::new()); - - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - - let body_request = RemoteBodyRequest { header: header.clone(), retry_count: None }; - - assert!( - local_checker.check_body_proof(&body_request, block.extrinsics).is_err(), - "vec![1, 2, 3, 4] != vec![]" - ); -} - -#[test] -fn check_body_proof_of_same_data_should_succeed() { - let extrinsics = vec![Extrinsic::IncludeData(vec![1, 2, 3, 4, 5, 6, 7, 8, 255])]; - - let header = header_with_computed_extrinsics_root(extrinsics.clone()); - let block = Block::new(header.clone(), extrinsics); - - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - Box::new(TaskExecutor::new()), - ); - - let body_request = RemoteBodyRequest { header: header.clone(), retry_count: None }; - - assert!(local_checker.check_body_proof(&body_request, block.extrinsics).is_ok()); -} diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 8ea605c0ea5be..33cbefbb06a95 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -57,9 +57,6 @@ use substrate_test_runtime_client::{ Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; -mod db; -mod light; - const TEST_ENGINE_ID: ConsensusEngineId = *b"TEST"; pub struct ExecutorDispatch; diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index a4e740aabc18e..c44a5cdb97431 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -33,8 +33,9 @@ use sc_service::{ SpawnTaskHandle, TaskManager, TransactionStorageMode, }; use sc_transaction_pool_api::TransactionPool; +use sp_api::BlockId; use sp_blockchain::HeaderBackend; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use std::{iter, net::Ipv4Addr, pin::Pin, sync::Arc, task::Context, time::Duration}; use tempfile::TempDir; use tokio::{runtime::Runtime, time}; @@ -45,22 +46,20 @@ mod client; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); -struct TestNet { +struct TestNet { runtime: Runtime, authority_nodes: Vec<(usize, F, U, Multiaddr)>, full_nodes: Vec<(usize, F, U, Multiaddr)>, - light_nodes: Vec<(usize, L, Multiaddr)>, chain_spec: GenericChainSpec, base_port: u16, nodes: usize, } -impl Drop for TestNet { +impl Drop for TestNet { fn drop(&mut self) { // Drop the nodes before dropping the runtime, as the runtime otherwise waits for all // futures to be ended and we run into a dead lock. self.full_nodes.drain(..); - self.light_nodes.drain(..); self.authority_nodes.drain(..); } } @@ -156,39 +155,26 @@ where } } -impl TestNet +impl TestNet where F: Clone + Send + 'static, - L: Clone + Send + 'static, U: Clone + Send + 'static, { - pub fn run_until_all_full(&mut self, full_predicate: FP, light_predicate: LP) + pub fn run_until_all_full(&mut self, full_predicate: FP) where FP: Send + Fn(usize, &F) -> bool + 'static, - LP: Send + Fn(usize, &L) -> bool + 'static, { let full_nodes = self.full_nodes.clone(); - let light_nodes = self.light_nodes.clone(); let future = async move { let mut interval = time::interval(Duration::from_millis(100)); - loop { interval.tick().await; - let full_ready = full_nodes + if full_nodes .iter() - .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)); - - if !full_ready { - continue - } - - let light_ready = light_nodes - .iter() - .all(|&(ref id, ref service, _)| light_predicate(*id, service)); - - if light_ready { - return + .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)) + { + break } } }; @@ -278,10 +264,9 @@ fn node_config< } } -impl TestNet +impl TestNet where F: TestNetNode, - L: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, G: RuntimeGenesis + 'static, { @@ -289,10 +274,9 @@ where temp: &TempDir, spec: GenericChainSpec, full: impl Iterator Result<(F, U), Error>>, - light: impl Iterator Result>, authorities: impl Iterator Result<(F, U), Error>)>, base_port: u16, - ) -> TestNet { + ) -> TestNet { sp_tracing::try_init_simple(); fdlimit::raise_fd_limit(); let runtime = Runtime::new().expect("Error creating tokio runtime"); @@ -300,12 +284,11 @@ where runtime, authority_nodes: Default::default(), full_nodes: Default::default(), - light_nodes: Default::default(), chain_spec: spec, base_port, nodes: 0, }; - net.insert_nodes(temp, full, light, authorities); + net.insert_nodes(temp, full, authorities); net } @@ -313,7 +296,6 @@ where &mut self, temp: &TempDir, full: impl Iterator Result<(F, U), Error>>, - light: impl Iterator Result>, authorities: impl Iterator Result<(F, U), Error>)>, ) { let handle = self.runtime.handle().clone(); @@ -358,26 +340,6 @@ where self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } - - for light in light { - let node_config = node_config( - self.nodes, - &self.chain_spec, - Role::Light, - handle.clone(), - None, - self.base_port, - &temp, - ); - let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = light(node_config).expect("Error creating test node service"); - - handle.spawn(service.clone().map_err(|_| ())); - let addr = addr - .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); - self.light_nodes.push((self.nodes, service, addr)); - self.nodes += 1; - } } } @@ -388,23 +350,16 @@ fn tempdir_with_prefix(prefix: &str) -> TempDir { .expect("Error creating test dir") } -pub fn connectivity( - spec: GenericChainSpec, - full_builder: Fb, - light_builder: Lb, -) where +pub fn connectivity(spec: GenericChainSpec, full_builder: Fb) +where E: ChainSpecExtension + Clone + 'static + Send + Sync, G: RuntimeGenesis + 'static, Fb: Fn(Configuration) -> Result, F: TestNetNode, - Lb: Fn(Configuration) -> Result, - L: TestNetNode, { const NUM_FULL_NODES: usize = 5; - const NUM_LIGHT_NODES: usize = 5; - let expected_full_connections = NUM_FULL_NODES - 1 + NUM_LIGHT_NODES; - let expected_light_connections = NUM_FULL_NODES; + let expected_full_connections = NUM_FULL_NODES - 1; { let temp = tempdir_with_prefix("substrate-connectivity-test"); @@ -413,7 +368,6 @@ pub fn connectivity( &temp, spec.clone(), (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), - (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -427,25 +381,12 @@ pub fn connectivity( .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } - for (_, service, _) in network.light_nodes.iter() { - service - .network() - .add_reserved_peer(first_address.to_string()) - .expect("Error adding reserved peer"); - } - network.run_until_all_full( - move |_index, service| { - let connected = service.network().num_connected(); - debug!("Got {}/{} full connections...", connected, expected_full_connections); - connected == expected_full_connections - }, - move |_index, service| { - let connected = service.network().num_connected(); - debug!("Got {}/{} light connections...", connected, expected_light_connections); - connected == expected_light_connections - }, - ); + network.run_until_all_full(move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} full connections...", connected, expected_full_connections); + connected == expected_full_connections + }); }; temp.close().expect("Error removing temp dir"); @@ -457,7 +398,6 @@ pub fn connectivity( &temp, spec, (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), - (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -465,8 +405,7 @@ pub fn connectivity( ); info!("Checking linked topology"); let mut address = network.full_nodes[0].3.clone(); - let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES); - for i in 0..max_nodes { + for i in 0..NUM_FULL_NODES { if i != 0 { if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { service @@ -476,44 +415,26 @@ pub fn connectivity( address = node_id.clone(); } } - - if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service - .network() - .add_reserved_peer(address.to_string()) - .expect("Error adding reserved peer"); - address = node_id.clone(); - } } - network.run_until_all_full( - move |_index, service| { - let connected = service.network().num_connected(); - debug!("Got {}/{} full connections...", connected, expected_full_connections); - connected == expected_full_connections - }, - move |_index, service| { - let connected = service.network().num_connected(); - debug!("Got {}/{} light connections...", connected, expected_light_connections); - connected == expected_light_connections - }, - ); + network.run_until_all_full(move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} full connections...", connected, expected_full_connections); + connected == expected_full_connections + }); } temp.close().expect("Error removing temp dir"); } } -pub fn sync( +pub fn sync( spec: GenericChainSpec, full_builder: Fb, - light_builder: Lb, mut make_block_and_import: B, mut extrinsic_factory: ExF, ) where Fb: Fn(Configuration) -> Result<(F, U), Error>, F: TestNetNode, - Lb: Fn(Configuration) -> Result, - L: TestNetNode, B: FnMut(&F, &mut U), ExF: FnMut(&F, &U) -> ::Extrinsic, U: Clone + Send + 'static, @@ -521,15 +442,12 @@ pub fn sync( G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; - // FIXME: BABE light client support is currently not working. - const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 512; let temp = tempdir_with_prefix("substrate-sync-test"); let mut network = TestNet::new( &temp, spec, (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg)), - (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), @@ -560,16 +478,10 @@ pub fn sync( .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } - for (_, service, _) in network.light_nodes.iter() { - service - .network() - .add_reserved_peer(first_address.to_string()) - .expect("Error adding reserved peer"); - } - network.run_until_all_full( - |_index, service| service.client().info().best_number == (NUM_BLOCKS as u32).into(), - |_index, service| service.client().info().best_number == (NUM_BLOCKS as u32).into(), - ); + + network.run_until_all_full(|_index, service| { + service.client().info().best_number == (NUM_BLOCKS as u32).into() + }); info!("Checking extrinsic propagation"); let first_service = network.full_nodes[0].1.clone(); @@ -585,34 +497,26 @@ pub fn sync( )) .expect("failed to submit extrinsic"); - network.run_until_all_full( - |_index, service| service.transaction_pool().ready().count() == 1, - |_index, _service| true, - ); + network.run_until_all_full(|_index, service| service.transaction_pool().ready().count() == 1); } -pub fn consensus( +pub fn consensus( spec: GenericChainSpec, full_builder: Fb, - light_builder: Lb, authorities: impl IntoIterator, ) where Fb: Fn(Configuration) -> Result, F: TestNetNode, - Lb: Fn(Configuration) -> Result, - L: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; - const NUM_LIGHT_NODES: usize = 10; const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds let temp = tempdir_with_prefix("substrate-consensus-test"); let mut network = TestNet::new( &temp, spec, (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), - (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), authorities .into_iter() .map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -627,30 +531,20 @@ pub fn consensus( .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } - for (_, service, _) in network.light_nodes.iter() { - service - .network() - .add_reserved_peer(first_address.to_string()) - .expect("Error adding reserved peer"); - } for (_, service, _, _) in network.authority_nodes.iter().skip(1) { service .network() .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } - network.run_until_all_full( - |_index, service| { - service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into() - }, - |_index, service| service.client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), - ); + network.run_until_all_full(|_index, service| { + service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into() + }); info!("Adding more peers"); network.insert_nodes( &temp, (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), - (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -661,14 +555,8 @@ pub fn consensus( .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } - for (_, service, _) in network.light_nodes.iter() { - service - .network() - .add_reserved_peer(first_address.to_string()) - .expect("Error adding reserved peer"); - } - network.run_until_all_full( - |_index, service| service.client().info().finalized_number >= (NUM_BLOCKS as u32).into(), - |_index, service| service.client().info().best_number >= (NUM_BLOCKS as u32).into(), - ); + + network.run_until_all_full(|_index, service| { + service.client().info().finalized_number >= (NUM_BLOCKS as u32).into() + }); } From 040ab4027e40fdeb607a31cf4d30eefd10f1aa0f Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sat, 30 Oct 2021 15:02:09 +0200 Subject: [PATCH 024/162] Add Baseline FRAME Benchmarks (#9691) * create and add baseline * fix import * try a different name * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * increase repeats * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Update baseline.rs * Update baseline.rs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * improve hash benchmark * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_benchmarking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/benchmarking/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Bot --- bin/node-template/runtime/src/lib.rs | 10 +- bin/node/runtime/src/lib.rs | 9 +- frame/benchmarking/src/baseline.rs | 173 +++++++++++++++++++++++++++ frame/benchmarking/src/lib.rs | 2 + frame/benchmarking/src/weights.rs | 122 +++++++++++++++++++ 5 files changed, 312 insertions(+), 4 deletions(-) create mode 100644 frame/benchmarking/src/baseline.rs create mode 100644 frame/benchmarking/src/weights.rs diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 4b49cb48ef352..dbea698002c60 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -463,12 +463,14 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_benchmarking::{list_benchmark, baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; let mut list = Vec::::new(); + list_benchmark!(list, extra, frame_benchmarking, BaselineBench::); list_benchmark!(list, extra, frame_system, SystemBench::); list_benchmark!(list, extra, pallet_balances, Balances); list_benchmark!(list, extra, pallet_timestamp, Timestamp); @@ -482,10 +484,13 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; + use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; + impl frame_system_benchmarking::Config for Runtime {} + impl baseline::Config for Runtime {} let whitelist: Vec = vec![ // Block Number @@ -503,6 +508,7 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); + add_benchmark!(params, batches, frame_benchmarking, BaselineBench::); add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_balances, Balances); add_benchmark!(params, batches, pallet_timestamp, Timestamp); diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0638e62faa362..9154ef6ca53df 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1609,7 +1609,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_benchmarking::{list_benchmark, baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency @@ -1618,9 +1618,11 @@ impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; let mut list = Vec::::new(); + list_benchmark!(list, extra, frame_benchmarking, BaselineBench::); list_benchmark!(list, extra, pallet_assets, Assets); list_benchmark!(list, extra, pallet_babe, Babe); list_benchmark!(list, extra, pallet_bags_list, BagsList); @@ -1662,7 +1664,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; + use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency // issues. To get around that, we separated the Session benchmarks into its own crate, @@ -1670,10 +1672,12 @@ impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use frame_system_benchmarking::Pallet as SystemBench; + use baseline::Pallet as BaselineBench; impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl frame_system_benchmarking::Config for Runtime {} + impl baseline::Config for Runtime {} let whitelist: Vec = vec![ // Block Number @@ -1695,6 +1699,7 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); + add_benchmark!(params, batches, frame_benchmarking, BaselineBench::); add_benchmark!(params, batches, pallet_assets, Assets); add_benchmark!(params, batches, pallet_babe, Babe); add_benchmark!(params, batches, pallet_balances, Balances); diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs new file mode 100644 index 0000000000000..a2ffca60c5cf1 --- /dev/null +++ b/frame/benchmarking/src/baseline.rs @@ -0,0 +1,173 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A set of benchmarks which can establish a global baseline for all other +//! benchmarking. + +use crate::benchmarks; +use codec::Encode; +use frame_system::Pallet as System; +use sp_runtime::traits::Hash; +use sp_std::prelude::*; + +pub struct Pallet(System); +pub trait Config: frame_system::Config {} + +benchmarks! { + addition { + let i in 0 .. 1_000_000; + let mut start = 0; + }: { + (0..i).for_each(|_| start += 1); + } verify { + assert_eq!(start, i); + } + + subtraction { + let i in 0 .. 1_000_000; + let mut start = u32::MAX; + }: { + (0..i).for_each(|_| start -= 1); + } verify { + assert_eq!(start, u32::MAX - i); + } + + multiplication { + let i in 0 .. 1_000_000; + let mut out = 0; + }: { + (1..=i).for_each(|j| out = 2 * j); + } verify { + assert_eq!(out, 2 * i); + } + + division { + let i in 0 .. 1_000_000; + let mut out = 0; + }: { + (0..=i).for_each(|j| out = j / 2); + } verify { + assert_eq!(out, i / 2); + } + + hashing { + let i in 0 .. 100; + let mut hash = T::Hash::default(); + }: { + (0..=100_000u32).for_each(|j| hash = T::Hashing::hash(&j.to_be_bytes())); + } verify { + assert!(hash != T::Hash::default()); + } + + #[skip_meta] + storage_read { + let i in 0 .. 1_000; + let mut people = Vec::new(); + (0..i).for_each(|j| { + let hash = T::Hashing::hash(&j.to_be_bytes()).encode(); + frame_support::storage::unhashed::put(&hash, &hash); + people.push(hash); + }); + }: { + people.iter().for_each(|hash| { + // This does a storage read + let value = frame_support::storage::unhashed::get(hash); + assert_eq!(value, Some(hash.to_vec())); + }); + } + + #[skip_meta] + storage_write { + let i in 0 .. 1_000; + let mut hashes = Vec::new(); + (0..i).for_each(|j| { + let hash = T::Hashing::hash(&j.to_be_bytes()); + hashes.push(hash.encode()); + }); + }: { + hashes.iter().for_each(|hash| { + // This does a storage write + frame_support::storage::unhashed::put(hash, hash); + }); + } verify { + hashes.iter().for_each(|hash| { + let value = frame_support::storage::unhashed::get(hash); + assert_eq!(value, Some(hash.to_vec())); + }); + } + + impl_benchmark_test_suite!( + Pallet, + crate::baseline::mock::new_test_ext(), + crate::baseline::mock::Test, + ); +} + +#[cfg(test)] +pub mod mock { + use sp_runtime::{testing::H256, traits::IdentityLookup}; + + type AccountId = u64; + type AccountIndex = u32; + type BlockNumber = u64; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + } + ); + + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = AccountIndex; + type BlockNumber = BlockNumber; + type Call = Call; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = sp_runtime::testing::Header; + type Event = Event; + type BlockHashCount = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + } + + impl super::Config for Test {} + + pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + sp_io::TestExternalities::new(t) + } +} diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 1805424426f6e..258b40cbe6f0b 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -27,6 +27,8 @@ mod tests; mod tests_instance; mod utils; +pub mod baseline; + #[cfg(feature = "std")] pub use analysis::{Analysis, AnalysisChoice, BenchmarkSelector, RegressionModel}; #[doc(hidden)] diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs new file mode 100644 index 0000000000000..807ff697fdcaa --- /dev/null +++ b/frame/benchmarking/src/weights.rs @@ -0,0 +1,122 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for frame_benchmarking +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-10-30, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=frame_benchmarking +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/benchmarking/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for frame_benchmarking. +pub trait WeightInfo { + fn addition(i: u32, ) -> Weight; + fn subtraction(i: u32, ) -> Weight; + fn multiplication(i: u32, ) -> Weight; + fn division(i: u32, ) -> Weight; + fn hashing(i: u32, ) -> Weight; + fn storage_read(i: u32, ) -> Weight; + fn storage_write(i: u32, ) -> Weight; +} + +/// Weights for frame_benchmarking using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn addition(_i: u32, ) -> Weight { + (337_000 as Weight) + } + fn subtraction(_i: u32, ) -> Weight { + (343_000 as Weight) + } + fn multiplication(_i: u32, ) -> Weight { + (340_000 as Weight) + } + fn division(_i: u32, ) -> Weight { + (346_000 as Weight) + } + fn hashing(_i: u32, ) -> Weight { + (35_449_143_000 as Weight) + } + // Storage: Skipped Metadata (r:0 w:0) + fn storage_read(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((2_851_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Skipped Metadata (r:0 w:0) + fn storage_write(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((662_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn addition(_i: u32, ) -> Weight { + (337_000 as Weight) + } + fn subtraction(_i: u32, ) -> Weight { + (343_000 as Weight) + } + fn multiplication(_i: u32, ) -> Weight { + (340_000 as Weight) + } + fn division(_i: u32, ) -> Weight { + (346_000 as Weight) + } + fn hashing(_i: u32, ) -> Weight { + (35_449_143_000 as Weight) + } + // Storage: Skipped Metadata (r:0 w:0) + fn storage_read(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 3_000 + .saturating_add((2_851_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + } + // Storage: Skipped Metadata (r:0 w:0) + fn storage_write(i: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((662_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } +} From e11a670b0310130c329ab32a6e16608f5712113d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 30 Oct 2021 15:09:56 +0200 Subject: [PATCH 025/162] pallet-utility: Fix possible mismatch between native/wasm (#10121) * pallet-utility: Fix possible mismatch between native/wasm The `batched_calls_limit` constant value includes the `size_of` of the runtime `Call`. As we compile the runtime for native/wasm, we need to align the call size to ensure that it is the same on wasm/native. This also solves the problem of different metadata outputs for the same runtime. * Review feedback --- frame/utility/src/lib.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 54de87c4740c8..8712cf74f451b 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -112,12 +112,19 @@ pub mod pallet { ItemCompleted, } + // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm + // the `size_of` of the `Call` can be different. To ensure that this don't leads to + // mismatches between native/wasm or to different metadata for the same runtime, we + // algin the call size. The value is choosen big enough to hopefully never reach it. + const CALL_ALIGN: u32 = 1024; + #[pallet::extra_constants] impl Pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let call_size = core::mem::size_of::<::Call>() as u32; + let call_size = ((sp_std::mem::size_of::<::Call>() as u32 + CALL_ALIGN - + 1) / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; @@ -125,6 +132,18 @@ pub mod pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + fn integrity_test() { + // If you hit this error, you need to try to `Box` big dispatchable parameters. + assert!( + sp_std::mem::size_of::<::Call>() as u32 <= CALL_ALIGN, + "Call enum size should be smaller than {} bytes.", + CALL_ALIGN, + ); + } + } + #[pallet::error] pub enum Error { /// Too many calls batched. From 794e9a91927dda5ffa6b29b1771ccdde947af7a2 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Sun, 31 Oct 2021 11:36:26 +0100 Subject: [PATCH 026/162] fix query details (#10107) --- frame/transaction-payment/src/lib.rs | 59 ++++++++++++++++++++++++---- 1 file changed, 52 insertions(+), 7 deletions(-) diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 28200bee7054f..59d94a823723b 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -403,7 +403,7 @@ where /// /// All dispatchables must be annotated with weight and will have some fee info. This function /// always returns. - pub fn query_info( + pub fn query_info( unchecked_extrinsic: Extrinsic, len: u32, ) -> RuntimeDispatchInfo> @@ -417,14 +417,20 @@ where // a very very little potential gain in the future. let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - let partial_fee = Self::compute_fee(len, &dispatch_info, 0u32.into()); + let partial_fee = if unchecked_extrinsic.is_signed().unwrap_or(false) { + Self::compute_fee(len, &dispatch_info, 0u32.into()) + } else { + // Unsigned extrinsics have no partial fee. + 0u32.into() + }; + let DispatchInfo { weight, class, .. } = dispatch_info; RuntimeDispatchInfo { weight, class, partial_fee } } /// Query the detailed fee of a given `call`. - pub fn query_fee_details( + pub fn query_fee_details( unchecked_extrinsic: Extrinsic, len: u32, ) -> FeeDetails> @@ -432,7 +438,15 @@ where T::Call: Dispatchable, { let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - Self::compute_fee_details(len, &dispatch_info, 0u32.into()) + + let tip = 0u32.into(); + + if unchecked_extrinsic.is_signed().unwrap_or(false) { + Self::compute_fee_details(len, &dispatch_info, tip) + } else { + // Unsigned extrinsics have no inclusion fee. + FeeDetails { inclusion_fee: None, tip } + } } /// Compute the final fee value for a particular transaction. @@ -1141,20 +1155,24 @@ mod tests { } #[test] - fn query_info_works() { + fn query_info_and_fee_details_works() { let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); let origin = 111111; let extra = (); - let xt = TestXt::new(call, Some((origin, extra))); + let xt = TestXt::new(call.clone(), Some((origin, extra))); let info = xt.get_dispatch_info(); let ext = xt.encode(); let len = ext.len() as u32; + + let unsigned_xt = TestXt::<_, ()>::new(call, None); + let unsigned_xt_info = unsigned_xt.get_dispatch_info(); + ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { // all fees should be x1.5 >::put(Multiplier::saturating_from_rational(3, 2)); assert_eq!( - TransactionPayment::query_info(xt, len), + TransactionPayment::query_info(xt.clone(), len), RuntimeDispatchInfo { weight: info.weight, class: info.class, @@ -1163,6 +1181,33 @@ mod tests { + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ }, ); + + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info.weight.min(BlockWeights::get().max_block) as u64 * + 2 * 3 / 2 + }), + tip: 0, + }, + ); + + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); }); } From 1f1fc455d17f3640be562fc4639a6b74da71f655 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Sun, 31 Oct 2021 14:55:10 +0100 Subject: [PATCH 027/162] Automatic pallet parts in construct_runtime (#9681) * implement automatic parts * ui tests * rename * remove unnecessary exclude * better doc * better doc * fix genesis config * fix UI tests * fix UI test * Revert "fix UI test" This reverts commit a910351c0b24cfe42195cfd97d83a416640e3259. * implemented used_parts * Update frame/support/procedural/src/construct_runtime/mod.rs Co-authored-by: Keith Yeung * doc + fmt * Update frame/support/procedural/src/construct_runtime/parse.rs Co-authored-by: Keith Yeung * add doc in the macro * remove yet some more parts * fix ui test * more determnistic error message + fix ui tests * fix ui test * Apply suggestions from code review Co-authored-by: Keith Yeung * do refactor + fix ui tests * fmt * fix test * fix test * fix ui test * Apply suggestions from code review Co-authored-by: Keith Yeung * refactor * remove even more part in node-runtime * fix test * Add flow chart for the construct_runtime! execution flow * Fix typo * Ignore snippets that don't contain code * Refactor some code in expand_after * Rename expand_after to match_and_insert * cargo fmt * Fix rename * Remove frame_support argument to construct_runtime_parts * Make use of tt-call to simplify intermediate expansions * cargo fmt * Update match_and_insert documentation * Reset cursor to 0 when no matching patterns are found * Reorder struct fields on MatchAndInsertDef * Add test for dependency renames and fix frame-support import * Add more doc comments * Update frame/support/test/compile_pass/src/lib.rs Co-authored-by: Guillaume Thiolliere Co-authored-by: Keith Yeung Co-authored-by: Shawn Tabrizi --- Cargo.lock | 20 + Cargo.toml | 3 +- bin/node/cli/src/chain_spec.rs | 2 + bin/node/runtime/src/lib.rs | 80 ++-- bin/node/testing/src/genesis.rs | 2 + frame/support/Cargo.toml | 1 + .../procedural/src/construct_runtime/mod.rs | 262 +++++++++----- .../procedural/src/construct_runtime/parse.rs | 341 ++++++++++++++++-- frame/support/procedural/src/lib.rs | 128 +++++-- .../procedural/src/match_and_insert.rs | 159 ++++++++ .../procedural/src/pallet/expand/mod.rs | 3 + .../src/pallet/expand/tt_default_parts.rs | 82 +++++ frame/support/src/lib.rs | 4 +- frame/support/test/compile_pass/Cargo.toml | 30 ++ frame/support/test/compile_pass/src/lib.rs | 92 +++++ .../both_use_and_excluded_parts.rs | 33 ++ .../both_use_and_excluded_parts.stderr | 28 ++ .../construct_runtime_ui/duplicate_exclude.rs | 13 + .../duplicate_exclude.stderr | 5 + .../construct_runtime_ui/exclude_missspell.rs | 13 + .../exclude_missspell.stderr | 5 + .../exclude_undefined_part.rs | 38 ++ .../exclude_undefined_part.stderr | 28 ++ .../invalid_module_details.stderr | 6 +- .../invalid_token_after_module.stderr | 2 +- .../old_unsupported_pallet_decl.rs | 26 ++ .../old_unsupported_pallet_decl.stderr | 31 ++ .../use_undefined_part.rs | 38 ++ .../use_undefined_part.stderr | 28 ++ frame/support/test/tests/pallet.rs | 35 +- frame/support/test/tests/pallet_instance.rs | 15 +- 31 files changed, 1335 insertions(+), 218 deletions(-) create mode 100644 frame/support/procedural/src/match_and_insert.rs create mode 100644 frame/support/procedural/src/pallet/expand/tt_default_parts.rs create mode 100644 frame/support/test/compile_pass/Cargo.toml create mode 100644 frame/support/test/compile_pass/src/lib.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/duplicate_exclude.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/duplicate_exclude.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/exclude_missspell.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/exclude_missspell.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr diff --git a/Cargo.lock b/Cargo.lock index c3cfaec19532e..84ad0c0a563c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2058,6 +2058,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", + "tt-call", ] [[package]] @@ -2113,6 +2114,19 @@ dependencies = [ "trybuild", ] +[[package]] +name = "frame-support-test-compile-pass" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-runtime", + "sp-version", +] + [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" @@ -10803,6 +10817,12 @@ dependencies = [ "toml", ] +[[package]] +name = "tt-call" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" + [[package]] name = "twox-hash" version = "1.6.1" diff --git a/Cargo.toml b/Cargo.toml index 71473a4bc5689..4a228203159eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,6 +120,7 @@ members = [ "frame/support/procedural/tools", "frame/support/procedural/tools/derive", "frame/support/test", + "frame/support/test/compile_pass", "frame/system", "frame/system/benchmarking", "frame/system/rpc/runtime-api", @@ -271,4 +272,4 @@ yamux = { opt-level = 3 } zeroize = { opt-level = 3 } [profile.release] # Substrate runtime requires unwinding. -panic = "unwind" \ No newline at end of file +panic = "unwind" diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 8499c66e0c9dc..7b1ed90017c36 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -364,6 +364,8 @@ pub fn testnet_genesis( assets: Default::default(), gilt: Default::default(), transaction_storage: Default::default(), + scheduler: Default::default(), + transaction_payment: Default::default(), } } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 9154ef6ca53df..c0ad9bb006c92 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1248,47 +1248,47 @@ construct_runtime!( NodeBlock = node_primitives::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Utility: pallet_utility::{Pallet, Call, Event}, - Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Authorship: pallet_authorship::{Pallet, Call, Storage, Inherent}, - Indices: pallet_indices::{Pallet, Call, Storage, Config, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, - ElectionProviderMultiPhase: pallet_election_provider_multi_phase::{Pallet, Call, Storage, Event, ValidateUnsigned}, - Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, - Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, - Council: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config}, - TechnicalCommittee: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config}, - Elections: pallet_elections_phragmen::{Pallet, Call, Storage, Event, Config}, - TechnicalMembership: pallet_membership::::{Pallet, Call, Storage, Event, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned}, - Treasury: pallet_treasury::{Pallet, Call, Storage, Config, Event}, - Contracts: pallet_contracts::{Pallet, Call, Storage, Event}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, - ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config}, - AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, - Offences: pallet_offences::{Pallet, Storage, Event}, + System: frame_system, + Utility: pallet_utility, + Babe: pallet_babe, + Timestamp: pallet_timestamp, + Authorship: pallet_authorship, + Indices: pallet_indices, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + ElectionProviderMultiPhase: pallet_election_provider_multi_phase, + Staking: pallet_staking, + Session: pallet_session, + Democracy: pallet_democracy, + Council: pallet_collective::, + TechnicalCommittee: pallet_collective::, + Elections: pallet_elections_phragmen, + TechnicalMembership: pallet_membership::, + Grandpa: pallet_grandpa, + Treasury: pallet_treasury, + Contracts: pallet_contracts, + Sudo: pallet_sudo, + ImOnline: pallet_im_online, + AuthorityDiscovery: pallet_authority_discovery, + Offences: pallet_offences, Historical: pallet_session_historical::{Pallet}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, - Identity: pallet_identity::{Pallet, Call, Storage, Event}, - Society: pallet_society::{Pallet, Call, Storage, Event, Config}, - Recovery: pallet_recovery::{Pallet, Call, Storage, Event}, - Vesting: pallet_vesting::{Pallet, Call, Storage, Event, Config}, - Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, - Proxy: pallet_proxy::{Pallet, Call, Storage, Event}, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, - Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, - Tips: pallet_tips::{Pallet, Call, Storage, Event}, - Assets: pallet_assets::{Pallet, Call, Storage, Event, Config}, - Mmr: pallet_mmr::{Pallet, Storage}, - Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, - Gilt: pallet_gilt::{Pallet, Call, Storage, Event, Config}, - Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, - TransactionStorage: pallet_transaction_storage::{Pallet, Call, Storage, Inherent, Config, Event}, - BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip, + Identity: pallet_identity, + Society: pallet_society, + Recovery: pallet_recovery, + Vesting: pallet_vesting, + Scheduler: pallet_scheduler, + Proxy: pallet_proxy, + Multisig: pallet_multisig, + Bounties: pallet_bounties, + Tips: pallet_tips, + Assets: pallet_assets, + Mmr: pallet_mmr, + Lottery: pallet_lottery, + Gilt: pallet_gilt, + Uniques: pallet_uniques, + TransactionStorage: pallet_transaction_storage, + BagsList: pallet_bags_list, } ); diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 845227c5acee9..80399a6670e86 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -101,5 +101,7 @@ pub fn config_endowed( assets: Default::default(), gilt: Default::default(), transaction_storage: Default::default(), + scheduler: Default::default(), + transaction_payment: Default::default(), } } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index edb0ecd6442e8..4bc64d8b8e73a 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -25,6 +25,7 @@ sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primi sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../primitives/arithmetic" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +tt-call = "1.0.8" frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "./procedural" } paste = "1.0" once_cell = { version = "1", default-features = false, optional = true } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 863df34266591..4315d4278183a 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -15,115 +15,205 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Implementation of `construct_runtime`. +//! +//! `construct_runtime` implementation is recursive and can generate code which will call itself in +//! order to get all the pallet parts for each pallet. +//! +//! Pallets define their parts (`Call`, `Storage`, ..) either explicitly with the syntax +//! `::{Call, ...}` or implicitly. +//! +//! In case a pallet defines its parts implicitly, then the pallet must provide the +//! `tt_default_parts` macro. `construct_rutime` will generate some code which utilizes `tt_call` +//! to call the `tt_default_parts` macro of the pallet. `tt_default_parts` will then return the +//! default pallet parts as input tokens to the `match_and_replace` macro, which ultimately +//! generates a call to `construct_runtime` again, this time with all the pallet parts explicitly +//! defined. +//! +//! E.g. +//! ```ignore +//! construct_runtime!( +//! //... +//! { +//! System: frame_system = 0, // Implicit definition of parts +//! Balances: pallet_balances = 1, // Implicit definition of parts +//! } +//! ); +//! ``` +//! This call has some implicit pallet parts, thus it will expand to: +//! ```ignore +//! frame_support::tt_call! { +//! macro = [{ pallet_balances::tt_default_parts }] +//! ~~> frame_support::match_and_insert! { +//! target = [{ +//! frame_support::tt_call! { +//! macro = [{ frame_system::tt_default_parts }] +//! ~~> frame_support::match_and_insert! { +//! target = [{ +//! construct_runtime!( +//! //... +//! { +//! System: frame_system = 0, +//! Balances: pallet_balances = 1, +//! } +//! ); +//! }] +//! pattern = [{ System: frame_system }] +//! } +//! } +//! }] +//! pattern = [{ Balances: pallet_balances }] +//! } +//! } +//! ``` +//! `tt_default_parts` must be defined. It returns the pallet parts inside some tokens, and +//! then `tt_call` will pipe the returned pallet parts into the input of `match_and_insert`. +//! Thus `match_and_insert` will initially receive the following inputs: +//! ```ignore +//! frame_support::match_and_insert! { +//! target = [{ +//! frame_support::match_and_insert! { +//! target = [{ +//! construct_runtime!( +//! //... +//! { +//! System: frame_system = 0, +//! Balances: pallet_balances = 1, +//! } +//! ) +//! }] +//! pattern = [{ System: frame_system }] +//! tokens = [{ ::{Pallet, Call} }] +//! }] +//! pattern = [{ Balances: pallet_balances }] +//! tokens = [{ ::{Pallet, Call} }] +//! } +//! ``` +//! After dealing with `pallet_balances`, the inner `match_and_insert` will expand to: +//! ```ignore +//! frame_support::match_and_insert! { +//! target = [{ +//! construct_runtime!( +//! //... +//! { +//! System: frame_system = 0, // Implicit definition of parts +//! Balances: pallet_balances::{Pallet, Call} = 1, // Explicit definition of parts +//! } +//! ) +//! }] +//! pattern = [{ System: frame_system }] +//! tokens = [{ ::{Pallet, Call} }] +//! } +//! ``` +//! Which will then finally expand to the following: +//! ```ignore +//! construct_runtime!( +//! //... +//! { +//! System: frame_system::{Pallet, Call}, +//! Balances: pallet_balances::{Pallet, Call}, +//! } +//! ) +//! ``` +//! This call has no implicit pallet parts, thus it will expand to the runtime construction: +//! ```ignore +//! pub struct Runtime { ... } +//! pub struct Call { ... } +//! impl Call ... +//! pub enum Origin { ... } +//! ... +//! ``` +//! +//! Visualizing the entire flow of `construct_runtime!`, it would look like the following: +//! +//! ```ignore +//! +--------------------+ +---------------------+ +-------------------+ +//! | | | (defined in pallet) | | | +//! | construct_runtime! | --> | tt_default_parts! | --> | match_and_insert! | +//! | w/ no pallet parts | | | | | +//! +--------------------+ +---------------------+ +-------------------+ +//! +//! +--------------------+ +//! | | +//! --> | construct_runtime! | +//! | w/ pallet parts | +//! +--------------------+ +//! ``` + mod expand; mod parse; use frame_support_procedural_tools::{ - generate_crate_access, generate_hidden_includes, syn_ext as ext, + generate_crate_access, generate_crate_access_2018, generate_hidden_includes, +}; +use parse::{ + ExplicitRuntimeDeclaration, ImplicitRuntimeDeclaration, Pallet, RuntimeDeclaration, + WhereSection, }; -use parse::{PalletDeclaration, PalletPart, PalletPath, RuntimeDefinition, WhereSection}; use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; -use std::collections::HashMap; use syn::{Ident, Result}; /// The fixed name of the system pallet. const SYSTEM_PALLET_NAME: &str = "System"; -/// The complete definition of a pallet with the resulting fixed index. -#[derive(Debug, Clone)] -pub struct Pallet { - pub name: Ident, - pub index: u8, - pub path: PalletPath, - pub instance: Option, - pub pallet_parts: Vec, -} - -impl Pallet { - /// Get resolved pallet parts - fn pallet_parts(&self) -> &[PalletPart] { - &self.pallet_parts - } +/// Implementation of `construct_runtime` macro. Either expand to some code which will call +/// `construct_runtime` again, or expand to the final runtime definition. +pub fn construct_runtime(input: TokenStream) -> TokenStream { + let input_copy = input.clone(); + let definition = syn::parse_macro_input!(input as RuntimeDeclaration); - /// Find matching parts - fn find_part(&self, name: &str) -> Option<&PalletPart> { - self.pallet_parts.iter().find(|part| part.name() == name) - } + let res = match definition { + RuntimeDeclaration::Implicit(implicit_def) => + construct_runtime_intermediary_expansion(input_copy.into(), implicit_def), + RuntimeDeclaration::Explicit(explicit_decl) => + construct_runtime_final_expansion(explicit_decl), + }; - /// Return whether pallet contains part - fn exists_part(&self, name: &str) -> bool { - self.find_part(name).is_some() - } + res.unwrap_or_else(|e| e.to_compile_error()).into() } -/// Convert from the parsed pallet to their final information. -/// Assign index to each pallet using same rules as rust for fieldless enum. -/// I.e. implicit are assigned number incrementedly from last explicit or 0. -fn complete_pallets(decl: impl Iterator) -> syn::Result> { - let mut indices = HashMap::new(); - let mut last_index: Option = None; - let mut names = HashMap::new(); - - decl.map(|pallet| { - let final_index = match pallet.index { - Some(i) => i, - None => last_index.map_or(Some(0), |i| i.checked_add(1)).ok_or_else(|| { - let msg = "Pallet index doesn't fit into u8, index is 256"; - syn::Error::new(pallet.name.span(), msg) - })?, - }; - - last_index = Some(final_index); - - if let Some(used_pallet) = indices.insert(final_index, pallet.name.clone()) { - let msg = format!( - "Pallet indices are conflicting: Both pallets {} and {} are at index {}", - used_pallet, pallet.name, final_index, - ); - let mut err = syn::Error::new(used_pallet.span(), &msg); - err.combine(syn::Error::new(pallet.name.span(), msg)); - return Err(err) - } - - if let Some(used_pallet) = names.insert(pallet.name.clone(), pallet.name.span()) { - let msg = "Two pallets with the same name!"; - - let mut err = syn::Error::new(used_pallet, &msg); - err.combine(syn::Error::new(pallet.name.span(), &msg)); - return Err(err) - } - - Ok(Pallet { - name: pallet.name, - index: final_index, - path: pallet.path, - instance: pallet.instance, - pallet_parts: pallet.pallet_parts, - }) - }) - .collect() -} +/// When some pallet have implicit parts definition then the macro will expand into a macro call to +/// `construct_runtime_args` of each pallets, see root documentation. +fn construct_runtime_intermediary_expansion( + input: TokenStream2, + definition: ImplicitRuntimeDeclaration, +) -> Result { + let frame_support = generate_crate_access_2018("frame-support")?; + let mut expansion = quote::quote!( + #frame_support::construct_runtime! { #input } + ); + for pallet in definition.pallets.iter().filter(|pallet| pallet.pallet_parts.is_none()) { + let pallet_path = &pallet.path; + let pallet_name = &pallet.name; + let pallet_instance = pallet.instance.as_ref().map(|instance| quote::quote!(::<#instance>)); + expansion = quote::quote!( + #frame_support::tt_call! { + macro = [{ #pallet_path::tt_default_parts }] + frame_support = [{ #frame_support }] + ~~> #frame_support::match_and_insert! { + target = [{ #expansion }] + pattern = [{ #pallet_name: #pallet_path #pallet_instance }] + } + } + ); + } -pub fn construct_runtime(input: TokenStream) -> TokenStream { - let definition = syn::parse_macro_input!(input as RuntimeDefinition); - construct_runtime_parsed(definition) - .unwrap_or_else(|e| e.to_compile_error()) - .into() + Ok(expansion.into()) } -fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result { - let RuntimeDefinition { +/// All pallets have explicit definition of parts, this will expand to the runtime declaration. +fn construct_runtime_final_expansion( + definition: ExplicitRuntimeDeclaration, +) -> Result { + let ExplicitRuntimeDeclaration { name, - where_section: WhereSection { block, node_block, unchecked_extrinsic, .. }, - pallets: - ext::Braces { content: ext::Punctuated { inner: pallets, .. }, token: pallets_token }, - .. + where_section: WhereSection { block, node_block, unchecked_extrinsic }, + pallets, + pallets_token, } = definition; - let pallets = complete_pallets(pallets.into_iter())?; - let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index a0ec6dfa5803e..f80b7b1ac554c 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -17,13 +17,13 @@ use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::{Span, TokenStream}; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use syn::{ ext::IdentExt, parse::{Parse, ParseStream}, punctuated::Punctuated, spanned::Spanned, - token, Error, Ident, Path, PathArguments, PathSegment, Result, Token, + token, Error, Ident, Path, Result, Token, }; mod keyword { @@ -38,26 +38,63 @@ mod keyword { syn::custom_keyword!(Origin); syn::custom_keyword!(Inherent); syn::custom_keyword!(ValidateUnsigned); + syn::custom_keyword!(exclude_parts); + syn::custom_keyword!(use_parts); } +/// Declaration of a runtime. +/// +/// Pallet declare their part either explicitly or implicitly (using no part declaration) +/// If all pallet have explicit parts then the runtime declaration is explicit, otherwise it is +/// implicit. +#[derive(Debug)] +pub enum RuntimeDeclaration { + Implicit(ImplicitRuntimeDeclaration), + Explicit(ExplicitRuntimeDeclaration), +} + +/// Declaration of a runtime with some pallet with implicit declaration of parts. +#[derive(Debug)] +pub struct ImplicitRuntimeDeclaration { + pub name: Ident, + pub where_section: WhereSection, + pub pallets: Vec, +} + +/// Declaration of a runtime with all pallet having explicit declaration of parts. #[derive(Debug)] -pub struct RuntimeDefinition { - pub visibility_token: Token![pub], - pub enum_token: Token![enum], +pub struct ExplicitRuntimeDeclaration { pub name: Ident, pub where_section: WhereSection, - pub pallets: ext::Braces>, + pub pallets: Vec, + pub pallets_token: token::Brace, } -impl Parse for RuntimeDefinition { +impl Parse for RuntimeDeclaration { fn parse(input: ParseStream) -> Result { - Ok(Self { - visibility_token: input.parse()?, - enum_token: input.parse()?, - name: input.parse()?, - where_section: input.parse()?, - pallets: input.parse()?, - }) + input.parse::()?; + input.parse::()?; + let name = input.parse::()?; + let where_section = input.parse()?; + let pallets = + input.parse::>>()?; + let pallets_token = pallets.token; + + match convert_pallets(pallets.content.inner.into_iter().collect())? { + PalletsConversion::Implicit(pallets) => + Ok(RuntimeDeclaration::Implicit(ImplicitRuntimeDeclaration { + name, + where_section, + pallets, + })), + PalletsConversion::Explicit(pallets) => + Ok(RuntimeDeclaration::Explicit(ExplicitRuntimeDeclaration { + name, + where_section, + pallets, + pallets_token, + })), + } } } @@ -136,14 +173,34 @@ impl Parse for WhereDefinition { } } +/// The declaration of a pallet. #[derive(Debug, Clone)] pub struct PalletDeclaration { + /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, - /// Optional fixed index (e.g. `MyPallet ... = 3,`) + /// Optional fixed index, e.g. `MyPallet ... = 3,`. pub index: Option, + /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. pub path: PalletPath, + /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`. pub instance: Option, - pub pallet_parts: Vec, + /// The declared pallet parts, + /// e.g. `Some([Pallet, Call])` for `System: system::{Pallet, Call}` + /// or `None` for `System: system`. + pub pallet_parts: Option>, + /// The specified parts, either use_parts or exclude_parts. + pub specified_parts: SpecifiedParts, +} + +/// The possible declaration of pallet parts to use. +#[derive(Debug, Clone)] +pub enum SpecifiedParts { + /// Use all the pallet parts except those specified. + Exclude(Vec), + /// Use only the specified pallet parts. + Use(Vec), + /// Use the all the pallet parts. + All, } impl Parse for PalletDeclaration { @@ -151,38 +208,78 @@ impl Parse for PalletDeclaration { let name = input.parse()?; let _: Token![:] = input.parse()?; let path = input.parse()?; - let instance = if input.peek(Token![<]) { + + // Parse for instance. + let instance = if input.peek(Token![::]) && input.peek3(Token![<]) { + let _: Token![::] = input.parse()?; let _: Token![<] = input.parse()?; let res = Some(input.parse()?); let _: Token![>] = input.parse()?; - let _: Token![::] = input.parse()?; res + } else if !(input.peek(Token![::]) && input.peek3(token::Brace)) && + !input.peek(keyword::exclude_parts) && + !input.peek(keyword::use_parts) && + !input.peek(Token![=]) && + !input.peek(Token![,]) && + !input.is_empty() + { + return Err(input.error( + "Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,`", + )) } else { None }; - let pallet_parts = parse_pallet_parts(input)?; + // Parse for explicit parts + let pallet_parts = if input.peek(Token![::]) && input.peek3(token::Brace) { + let _: Token![::] = input.parse()?; + Some(parse_pallet_parts(input)?) + } else if !input.peek(keyword::exclude_parts) && + !input.peek(keyword::use_parts) && + !input.peek(Token![=]) && + !input.peek(Token![,]) && + !input.is_empty() + { + return Err(input.error( + "Unexpected tokens, expected one of `::{`, `exclude_parts`, `use_parts`, `=`, `,`", + )) + } else { + None + }; + + // Parse for specified parts + let specified_parts = if input.peek(keyword::exclude_parts) { + let _: keyword::exclude_parts = input.parse()?; + SpecifiedParts::Exclude(parse_pallet_parts_no_generic(input)?) + } else if input.peek(keyword::use_parts) { + let _: keyword::use_parts = input.parse()?; + SpecifiedParts::Use(parse_pallet_parts_no_generic(input)?) + } else if !input.peek(Token![=]) && !input.peek(Token![,]) && !input.is_empty() { + return Err(input.error("Unexpected tokens, expected one of `exclude_parts`, `=`, `,`")) + } else { + SpecifiedParts::All + }; + // Parse for pallet index let index = if input.peek(Token![=]) { input.parse::()?; let index = input.parse::()?; let index = index.base10_parse::()?; Some(index) + } else if !input.peek(Token![,]) && !input.is_empty() { + return Err(input.error("Unexpected tokens, expected one of `=`, `,`")) } else { None }; - let parsed = Self { name, path, instance, pallet_parts, index }; - - Ok(parsed) + Ok(Self { name, path, instance, pallet_parts, specified_parts, index }) } } /// A struct representing a path to a pallet. `PalletPath` is almost identical to the standard /// Rust path with a few restrictions: /// - No leading colons allowed -/// - Path segments can only consist of identifers; angle-bracketed or parenthesized segments will -/// result in a parsing error (except when specifying instances) +/// - Path segments can only consist of identifers separated by colons #[derive(Debug, Clone)] pub struct PalletPath { pub inner: Path, @@ -202,34 +299,27 @@ impl PalletPath { impl Parse for PalletPath { fn parse(input: ParseStream) -> Result { - let mut lookahead = input.lookahead1(); - let mut segments = Punctuated::new(); + let mut res = + PalletPath { inner: Path { leading_colon: None, segments: Punctuated::new() } }; + let lookahead = input.lookahead1(); if lookahead.peek(Token![crate]) || lookahead.peek(Token![self]) || lookahead.peek(Token![super]) || lookahead.peek(Ident) { let ident = input.call(Ident::parse_any)?; - segments.push(PathSegment { ident, arguments: PathArguments::None }); - let _: Token![::] = input.parse()?; - lookahead = input.lookahead1(); + res.inner.segments.push(ident.into()); } else { return Err(lookahead.error()) } - while lookahead.peek(Ident) { - let ident = input.parse()?; - segments.push(PathSegment { ident, arguments: PathArguments::None }); - let _: Token![::] = input.parse()?; - lookahead = input.lookahead1(); - } - - if !lookahead.peek(token::Brace) && !lookahead.peek(Token![<]) { - return Err(lookahead.error()) + while input.peek(Token![::]) && input.peek3(Ident) { + input.parse::()?; + let ident = input.parse::()?; + res.inner.segments.push(ident.into()); } - - Ok(Self { inner: Path { leading_colon: None, segments } }) + Ok(res) } } @@ -391,3 +481,174 @@ fn remove_kind( Err(input.error(msg)) } } + +/// The declaration of a part without its generics +#[derive(Debug, Clone)] +pub struct PalletPartNoGeneric { + keyword: PalletPartKeyword, +} + +impl Parse for PalletPartNoGeneric { + fn parse(input: ParseStream) -> Result { + Ok(Self { keyword: input.parse()? }) + } +} + +/// Parse [`PalletPartNoGeneric`]'s from a braces enclosed list that is split by commas, e.g. +/// +/// `{ Call, Event }` +fn parse_pallet_parts_no_generic(input: ParseStream) -> Result> { + let pallet_parts: ext::Braces> = + input.parse()?; + + let mut resolved = HashSet::new(); + for part in pallet_parts.content.inner.iter() { + if !resolved.insert(part.keyword.name()) { + let msg = format!( + "`{}` was already declared before. Please remove the duplicate declaration", + part.keyword.name(), + ); + return Err(Error::new(part.keyword.span(), msg)) + } + } + + Ok(pallet_parts.content.inner.into_iter().collect()) +} + +/// The final definition of a pallet with the resulting fixed index and explicit parts. +#[derive(Debug, Clone)] +pub struct Pallet { + /// The name of the pallet, e.g.`System` in `System: frame_system`. + pub name: Ident, + /// Either automatically infered, or defined (e.g. `MyPallet ... = 3,`). + pub index: u8, + /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. + pub path: PalletPath, + /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`. + pub instance: Option, + /// The pallet parts to use for the pallet. + pub pallet_parts: Vec, +} + +impl Pallet { + /// Get resolved pallet parts + pub fn pallet_parts(&self) -> &[PalletPart] { + &self.pallet_parts + } + + /// Find matching parts + pub fn find_part(&self, name: &str) -> Option<&PalletPart> { + self.pallet_parts.iter().find(|part| part.name() == name) + } + + /// Return whether pallet contains part + pub fn exists_part(&self, name: &str) -> bool { + self.find_part(name).is_some() + } +} + +/// Result of a conversion of a declaration of pallets. +enum PalletsConversion { + Implicit(Vec), + Explicit(Vec), +} + +/// Convert from the parsed pallet declaration to their final information. +/// +/// Check if all pallet have explicit declaration of their parts, if so then assign index to each +/// pallet using same rules as rust for fieldless enum. I.e. implicit are assigned number +/// incrementedly from last explicit or 0. +fn convert_pallets(pallets: Vec) -> syn::Result { + if pallets.iter().any(|pallet| pallet.pallet_parts.is_none()) { + return Ok(PalletsConversion::Implicit(pallets)) + } + + let mut indices = HashMap::new(); + let mut last_index: Option = None; + let mut names = HashMap::new(); + + let pallets = pallets + .into_iter() + .map(|pallet| { + let final_index = match pallet.index { + Some(i) => i, + None => last_index.map_or(Some(0), |i| i.checked_add(1)).ok_or_else(|| { + let msg = "Pallet index doesn't fit into u8, index is 256"; + syn::Error::new(pallet.name.span(), msg) + })?, + }; + + last_index = Some(final_index); + + if let Some(used_pallet) = indices.insert(final_index, pallet.name.clone()) { + let msg = format!( + "Pallet indices are conflicting: Both pallets {} and {} are at index {}", + used_pallet, pallet.name, final_index, + ); + let mut err = syn::Error::new(used_pallet.span(), &msg); + err.combine(syn::Error::new(pallet.name.span(), msg)); + return Err(err) + } + + if let Some(used_pallet) = names.insert(pallet.name.clone(), pallet.name.span()) { + let msg = "Two pallets with the same name!"; + + let mut err = syn::Error::new(used_pallet, &msg); + err.combine(syn::Error::new(pallet.name.span(), &msg)); + return Err(err) + } + + let mut pallet_parts = pallet.pallet_parts.expect("Checked above"); + + let available_parts = + pallet_parts.iter().map(|part| part.keyword.name()).collect::>(); + + // Check parts are correctly specified + match &pallet.specified_parts { + SpecifiedParts::Exclude(parts) | SpecifiedParts::Use(parts) => + for part in parts { + if !available_parts.contains(part.keyword.name()) { + let msg = format!( + "Invalid pallet part specified, the pallet `{}` doesn't have the \ + `{}` part. Available parts are: {}.", + pallet.name, + part.keyword.name(), + pallet_parts.iter().fold(String::new(), |fold, part| { + if fold.is_empty() { + format!("`{}`", part.keyword.name()) + } else { + format!("{}, `{}`", fold, part.keyword.name()) + } + }) + ); + return Err(syn::Error::new(part.keyword.span(), msg)) + } + }, + SpecifiedParts::All => (), + } + + // Set only specified parts. + match pallet.specified_parts { + SpecifiedParts::Exclude(excluded_parts) => pallet_parts.retain(|part| { + !excluded_parts + .iter() + .any(|excluded_part| excluded_part.keyword.name() == part.keyword.name()) + }), + SpecifiedParts::Use(used_parts) => pallet_parts.retain(|part| { + used_parts.iter().any(|use_part| use_part.keyword.name() == part.keyword.name()) + }), + SpecifiedParts::All => (), + } + + Ok(Pallet { + name: pallet.name, + index: final_index, + path: pallet.path, + instance: pallet.instance, + pallet_parts, + }) + }) + .collect::>>()?; + + Ok(PalletsConversion::Explicit(pallets)) +} diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 6987fc49b9a8c..d01bbf6ace526 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -26,6 +26,7 @@ mod debug_no_bound; mod default_no_bound; mod dummy_part_checker; mod key_prefix; +mod match_and_insert; mod pallet; mod partial_eq_no_bound; mod storage; @@ -297,52 +298,91 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// /// # Example: /// -/// ```nocompile +/// ```ignore /// construct_runtime!( /// pub enum Runtime where /// Block = Block, /// NodeBlock = node::Block, /// UncheckedExtrinsic = UncheckedExtrinsic /// { -/// System: system::{Pallet, Call, Event, Config} = 0, -/// Test: test::{Pallet, Call} = 1, -/// Test2: test_with_long_module::{Pallet, Event}, +/// System: frame_system::{Pallet, Call, Event, Config} = 0, +/// Test: path::to::test::{Pallet, Call} = 1, /// /// // Pallets with instances -/// Test3_Instance1: test3::::{Pallet, Call, Storage, Event, Config, Origin}, -/// Test3_DefaultInstance: test3::{Pallet, Call, Storage, Event, Config, Origin} = 4, +/// Test2_Instance1: test2::::{Pallet, Call, Storage, Event, Config, Origin}, +/// Test2_DefaultInstance: test2::{Pallet, Call, Storage, Event, Config, Origin} = 4, +/// +/// // Pallets declared with `pallet` attribute macro: no need to define the parts +/// Test3_Instance1: test3::, +/// Test3_DefaultInstance: test3, +/// +/// // with `exclude_parts` keyword some part can be excluded. +/// Test4_Instance1: test4:: exclude_parts { Call, Origin }, +/// Test4_DefaultInstance: test4 exclude_parts { Storage }, +/// +/// // with `use_parts` keyword, a subset of the pallet parts can be specified. +/// Test4_Instance1: test4:: use_parts { Pallet, Call}, +/// Test4_DefaultInstance: test4 use_parts { Pallet }, /// } /// ) /// ``` /// -/// The identifier `System` is the name of the pallet and the lower case identifier `system` is the -/// name of the Rust module/crate for this Substrate pallet. The identifiers between the braces are -/// the pallet parts provided by the pallet. It is important to list these parts here to export -/// them correctly in the metadata or to make the pallet usable in the runtime. +/// Each pallet is declared as such: +/// * `Identifier`: name given to the pallet that uniquely identifies it. /// -/// We provide support for the following module parts in a pallet: +/// * `:`: colon separator /// -/// - `Pallet` - Required for all pallets -/// - `Call` - If the pallet has callable functions -/// - `Storage` - If the pallet uses storage -/// - `Event` or `Event` (if the event is generic) - If the pallet emits events -/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instanciable origins -/// - `Config` or `Config` (if the config is generic) - If the pallet builds the genesis storage -/// with `GenesisConfig` -/// - `Inherent` - If the pallet provides/can check inherents. -/// - `ValidateUnsigned` - If the pallet validates unsigned extrinsics. +/// * `path::to::pallet`: identifiers separated by colons which declare the path to a pallet +/// definition. /// -/// `= $n` is an optional part allowing to define at which index the pallet variants in -/// `OriginCaller`, `Call` and `Event` are encoded, and to define the ModuleToIndex value. +/// * `::` optional: specify the instance of the pallet to use. If not specified it will +/// use the default instance (or the only instance in case of non-instantiable pallets). /// -/// if `= $n` is not given, then index is resolved same as fieldless enum in Rust -/// (i.e. incrementedly from previous index): -/// ```nocompile -/// pallet1 .. = 2, -/// pallet2 .., // Here pallet2 is given index 3 -/// pallet3 .. = 0, -/// pallet4 .., // Here pallet4 is given index 1 -/// ``` +/// * `::{ Part1, Part2, .. }` optional if pallet declared with `frame_support::pallet`: Comma +/// separated parts declared with their generic. If a pallet is declared with +/// `frame_support::pallet` macro then the parts can be automatically derived if not explicitly +/// provided. We provide support for the following module parts in a pallet: +/// +/// - `Pallet` - Required for all pallets +/// - `Call` - If the pallet has callable functions +/// - `Storage` - If the pallet uses storage +/// - `Event` or `Event` (if the event is generic) - If the pallet emits events +/// - `Origin` or `Origin` (if the origin is generic) - If the pallet has instanciable origins +/// - `Config` or `Config` (if the config is generic) - If the pallet builds the genesis +/// storage with `GenesisConfig` +/// - `Inherent` - If the pallet provides/can check inherents. +/// - `ValidateUnsigned` - If the pallet validates unsigned extrinsics. +/// +/// It is important to list these parts here to export them correctly in the metadata or to make +/// the pallet usable in the runtime. +/// +/// * `exclude_parts { Part1, Part2 }` optional: comma separated parts without generics. I.e. one of +/// `Pallet`, `Call`, `Storage`, `Event`, `Origin`, `Config`, `Inherent`, `ValidateUnsigned`. It +/// is incompatible with `use_parts`. This specifies the part to exclude. In order to select +/// subset of the pallet parts. +/// +/// For example excluding the part `Call` can be useful if the runtime doesn't want to make the +/// pallet calls available. +/// +/// * `use_parts { Part1, Part2 }` optional: comma separated parts without generics. I.e. one of +/// `Pallet`, `Call`, `Storage`, `Event`, `Origin`, `Config`, `Inherent`, `ValidateUnsigned`. It +/// is incompatible with `exclude_parts`. This specifies the part to use. In order to select a +/// subset of the pallet parts. +/// +/// For example not using the part `Call` can be useful if the runtime doesn't want to make the +/// pallet calls available. +/// +/// * `= $n` optional: number to define at which index the pallet variants in `OriginCaller`, `Call` +/// and `Event` are encoded, and to define the ModuleToIndex value. +/// +/// if `= $n` is not given, then index is resolved in the same way as fieldless enum in Rust +/// (i.e. incrementedly from previous index): +/// ```nocompile +/// pallet1 .. = 2, +/// pallet2 .., // Here pallet2 is given index 3 +/// pallet3 .. = 0, +/// pallet4 .., // Here pallet4 is given index 1 +/// ``` /// /// # Note /// @@ -352,8 +392,8 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// /// # Type definitions /// -/// * The macro generates a type alias for each pallet to their `Module` (or `Pallet`). E.g. `type -/// System = frame_system::Pallet` +/// * The macro generates a type alias for each pallet to their `Pallet`. E.g. `type System = +/// frame_system::Pallet` #[proc_macro] pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) @@ -498,3 +538,27 @@ pub fn impl_key_prefix_for_tuples(input: TokenStream) -> TokenStream { pub fn __generate_dummy_part_checker(input: TokenStream) -> TokenStream { dummy_part_checker::generate_dummy_part_checker(input) } + +/// Macro that inserts some tokens after the first match of some pattern. +/// +/// # Example: +/// +/// ```nocompile +/// match_and_insert!( +/// target = [{ Some content with { at some point match pattern } other match pattern are ignored }] +/// pattern = [{ match pattern }] // the match pattern cannot contain any group: `[]`, `()`, `{}` +/// // can relax this constraint, but will require modifying the match logic in code +/// tokens = [{ expansion tokens }] // content inside braces can be anything including groups +/// ); +/// ``` +/// +/// will generate: +/// +/// ```nocompile +/// Some content with { at some point match pattern expansion tokens } other match patterns are +/// ignored +/// ``` +#[proc_macro] +pub fn match_and_insert(input: TokenStream) -> TokenStream { + match_and_insert::match_and_insert(input) +} diff --git a/frame/support/procedural/src/match_and_insert.rs b/frame/support/procedural/src/match_and_insert.rs new file mode 100644 index 0000000000000..4ffc596e6dca0 --- /dev/null +++ b/frame/support/procedural/src/match_and_insert.rs @@ -0,0 +1,159 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of the `match_and_insert` macro. + +use proc_macro2::{Group, Span, TokenStream, TokenTree}; +use std::iter::once; +use syn::spanned::Spanned; + +mod keyword { + syn::custom_keyword!(target); + syn::custom_keyword!(pattern); + syn::custom_keyword!(tokens); +} + +pub fn match_and_insert(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let MatchAndInsertDef { pattern, tokens, target } = + syn::parse_macro_input!(input as MatchAndInsertDef); + + match expand_in_stream(&pattern, &mut Some(tokens), target) { + Ok(stream) => stream.into(), + Err(err) => err.to_compile_error().into(), + } +} + +struct MatchAndInsertDef { + // Token stream to search and insert tokens into. + target: TokenStream, + // Pattern to match against, this is ensured to have no TokenTree::Group nor TokenTree::Literal + // (i.e. contains only Punct or Ident), and not being empty. + pattern: Vec, + // Token stream to insert after the match pattern. + tokens: TokenStream, +} + +impl syn::parse::Parse for MatchAndInsertDef { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + let mut target; + let _ = input.parse::()?; + let _ = input.parse::()?; + let _replace_with_bracket: syn::token::Bracket = syn::bracketed!(target in input); + let _replace_with_brace: syn::token::Brace = syn::braced!(target in target); + let target = target.parse()?; + + let mut pattern; + let _ = input.parse::()?; + let _ = input.parse::()?; + let _replace_with_bracket: syn::token::Bracket = syn::bracketed!(pattern in input); + let _replace_with_brace: syn::token::Brace = syn::braced!(pattern in pattern); + let pattern = pattern.parse::()?.into_iter().collect::>(); + + if let Some(t) = pattern.iter().find(|t| matches!(t, TokenTree::Group(_))) { + return Err(syn::Error::new(t.span(), "Unexpected group token tree")) + } + if let Some(t) = pattern.iter().find(|t| matches!(t, TokenTree::Literal(_))) { + return Err(syn::Error::new(t.span(), "Unexpected literal token tree")) + } + + if pattern.is_empty() { + return Err(syn::Error::new(Span::call_site(), "empty match pattern is invalid")) + } + + let mut tokens; + let _ = input.parse::()?; + let _ = input.parse::()?; + let _replace_with_bracket: syn::token::Bracket = syn::bracketed!(tokens in input); + let _replace_with_brace: syn::token::Brace = syn::braced!(tokens in tokens); + let tokens = tokens.parse()?; + + Ok(Self { tokens, pattern, target }) + } +} + +// Insert `tokens` after the first matching `pattern`. +// `tokens` must be some (Option is used for internal simplification). +// `pattern` must not be empty and should only contain Ident or Punct. +fn expand_in_stream( + pattern: &[TokenTree], + tokens: &mut Option, + stream: TokenStream, +) -> syn::Result { + assert!( + tokens.is_some(), + "`tokens` must be some, Option is used because `tokens` is used only once" + ); + assert!( + !pattern.is_empty(), + "`pattern` must not be empty, otherwise there is nothing to match against" + ); + + let stream_span = stream.span(); + let mut stream = stream.into_iter(); + let mut extended = TokenStream::new(); + let mut match_cursor = 0; + + while let Some(token) = stream.next() { + match token { + TokenTree::Group(group) => { + match_cursor = 0; + let group_stream = group.stream(); + match expand_in_stream(pattern, tokens, group_stream) { + Ok(s) => { + extended.extend(once(TokenTree::Group(Group::new(group.delimiter(), s)))); + extended.extend(stream); + return Ok(extended) + }, + Err(_) => { + extended.extend(once(TokenTree::Group(group))); + }, + } + }, + other => { + advance_match_cursor(&other, pattern, &mut match_cursor); + + extended.extend(once(other)); + + if match_cursor == pattern.len() { + extended + .extend(once(tokens.take().expect("tokens is used to replace only once"))); + extended.extend(stream); + return Ok(extended) + } + }, + } + } + // if we reach this point, it means the stream is empty and we haven't found a matching pattern + let msg = format!("Cannot find pattern `{:?}` in given token stream", pattern); + Err(syn::Error::new(stream_span, msg)) +} + +fn advance_match_cursor(other: &TokenTree, pattern: &[TokenTree], match_cursor: &mut usize) { + use TokenTree::{Ident, Punct}; + + let does_match_other_pattern = match (other, &pattern[*match_cursor]) { + (Ident(i1), Ident(i2)) => i1 == i2, + (Punct(p1), Punct(p2)) => p1.as_char() == p2.as_char(), + _ => false, + }; + + if does_match_other_pattern { + *match_cursor += 1; + } else { + *match_cursor = 0; + } +} diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index 083ad61fc5239..21acd3c0dd32e 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -29,6 +29,7 @@ mod origin; mod pallet_struct; mod storage; mod store_trait; +mod tt_default_parts; mod type_value; mod validate_unsigned; @@ -67,6 +68,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { let type_values = type_value::expand_type_values(&mut def); let origins = origin::expand_origins(&mut def); let validate_unsigned = validate_unsigned::expand_validate_unsigned(&mut def); + let tt_default_parts = tt_default_parts::expand_tt_default_parts(&mut def); if get_doc_literals(&def.item.attrs).is_empty() { def.item.attrs.push(syn::parse_quote!( @@ -96,6 +98,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { #type_values #origins #validate_unsigned + #tt_default_parts ); def.item diff --git a/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/frame/support/procedural/src/pallet/expand/tt_default_parts.rs new file mode 100644 index 0000000000000..cfab7982bfdc9 --- /dev/null +++ b/frame/support/procedural/src/pallet/expand/tt_default_parts.rs @@ -0,0 +1,82 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{pallet::Def, COUNTER}; +use syn::spanned::Spanned; + +/// Generate the `tt_default_parts` macro. +pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream { + let count = COUNTER.with(|counter| counter.borrow_mut().inc()); + let default_parts_unique_id = + syn::Ident::new(&format!("__tt_default_parts_{}", count), def.item.span()); + + let call_part = def.call.as_ref().map(|_| quote::quote!(Call,)); + + let storage_part = (!def.storages.is_empty()).then(|| quote::quote!(Storage,)); + + let event_part = def.event.as_ref().map(|event| { + let gen = event.gen_kind.is_generic().then(|| quote::quote!( )); + quote::quote!( Event #gen , ) + }); + + let origin_part = def.origin.as_ref().map(|origin| { + let gen = origin.is_generic.then(|| quote::quote!( )); + quote::quote!( Origin #gen , ) + }); + + let config_part = def.genesis_config.as_ref().map(|genesis_config| { + let gen = genesis_config.gen_kind.is_generic().then(|| quote::quote!( )); + quote::quote!( Config #gen , ) + }); + + let inherent_part = def.inherent.as_ref().map(|_| quote::quote!(Inherent,)); + + let validate_unsigned_part = + def.validate_unsigned.as_ref().map(|_| quote::quote!(ValidateUnsigned,)); + + quote::quote!( + // This macro follows the conventions as laid out by the `tt-call` crate. It does not + // accept any arguments and simply returns the pallet parts, separated by commas, then + // wrapped inside of braces and finally prepended with double colons, to the caller inside + // of a key named `tokens`. + // + // We need to accept a frame_support argument here, because this macro gets expanded on the + // crate that called the `construct_runtime!` macro, and said crate may have renamed + // frame-support, and so we need to pass in the frame-support path that said crate + // recognizes. + #[macro_export] + #[doc(hidden)] + macro_rules! #default_parts_unique_id { + { + $caller:tt + frame_support = [{ $($frame_support:ident)::* }] + } => { + $($frame_support)*::tt_return! { + $caller + tokens = [{ + ::{ + Pallet, #call_part #storage_part #event_part #origin_part #config_part + #inherent_part #validate_unsigned_part + } + }] + } + }; + } + + pub use #default_parts_unique_id as tt_default_parts; + ) +} diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 1b93b5fb5975e..d81300a404c4f 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -50,6 +50,8 @@ pub use sp_runtime::RuntimeDebug; pub use sp_state_machine::BasicExternalities; #[doc(hidden)] pub use sp_std; +#[doc(hidden)] +pub use tt_call::*; #[macro_use] pub mod dispatch; @@ -573,7 +575,7 @@ pub fn debug(data: &impl sp_std::fmt::Debug) { #[doc(inline)] pub use frame_support_procedural::{ - construct_runtime, decl_storage, transactional, RuntimeDebugNoBound, + construct_runtime, decl_storage, match_and_insert, transactional, RuntimeDebugNoBound, }; #[doc(hidden)] diff --git a/frame/support/test/compile_pass/Cargo.toml b/frame/support/test/compile_pass/Cargo.toml new file mode 100644 index 0000000000000..bca833200d444 --- /dev/null +++ b/frame/support/test/compile_pass/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "frame-support-test-compile-pass" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/runtime" } +sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/version" } +support = { package = "frame-support", version = "4.0.0-dev", default-features = false, path = "../../" } +system = { package = "frame-system", version = "4.0.0-dev", default-features = false, path = "../../../system" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "support/std", + "system/std", +] diff --git a/frame/support/test/compile_pass/src/lib.rs b/frame/support/test/compile_pass/src/lib.rs new file mode 100644 index 0000000000000..17ba40574adf7 --- /dev/null +++ b/frame/support/test/compile_pass/src/lib.rs @@ -0,0 +1,92 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] +//! This crate tests that `construct_runtime!` expands the pallet parts +//! correctly even when frame-support is renamed in Cargo.toml + +use sp_core::{sr25519, H256}; +use sp_runtime::{ + create_runtime_str, generic, + traits::{BlakeTwo256, IdentityLookup, Verify}, +}; +use sp_version::RuntimeVersion; +use support::{construct_runtime, parameter_types}; + +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("frame-support-test-compile-pass"), + impl_name: create_runtime_str!("substrate-frame-support-test-compile-pass-runtime"), + authoring_version: 0, + spec_version: 0, + impl_version: 0, + apis: sp_version::create_apis_vec!([]), + transaction_version: 0, +}; + +pub type Signature = sr25519::Signature; +pub type AccountId = ::Signer; +pub type BlockNumber = u64; +pub type Index = u64; + +parameter_types! { + pub const BlockHashCount: BlockNumber = 2400; + pub const Version: RuntimeVersion = VERSION; + pub const SS58Prefix: u8 = 0; +} + +impl system::Config for Runtime { + type BaseCallFilter = support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type Index = u128; + type Hash = H256; + type Hashing = BlakeTwo256; + type Header = Header; + type Lookup = IdentityLookup; + type BlockHashCount = BlockHashCount; + type Version = Version; + type AccountData = (); + type Origin = Origin; + type BlockNumber = BlockNumber; + type AccountId = AccountId; + type Event = Event; + type PalletInfo = PalletInfo; + type Call = Call; + type DbWeight = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type OnSetCode = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; +} + +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system, + } +); diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs new file mode 100644 index 0000000000000..98cd1f197f619 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs @@ -0,0 +1,33 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet exclude_parts { Pallet } use_parts { Pallet }, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr new file mode 100644 index 0000000000000..608d57d6a97fc --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr @@ -0,0 +1,28 @@ +error: Unexpected tokens, expected one of `=`, `,` + --> $DIR/both_use_and_excluded_parts.rs:29:43 + | +29 | Pallet: pallet exclude_parts { Pallet } use_parts { Pallet }, + | ^^^^^^^^^ + +error[E0412]: cannot find type `Call` in this scope + --> $DIR/both_use_and_excluded_parts.rs:18:64 + | +18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | ^^^^ not found in this scope + | +help: consider importing one of these items + | +1 | use crate::pallet::Call; + | +1 | use frame_support_test::Call; + | +1 | use frame_system::Call; + | +1 | use test_pallet::Call; + | + +error[E0412]: cannot find type `Runtime` in this scope + --> $DIR/both_use_and_excluded_parts.rs:20:25 + | +20 | impl pallet::Config for Runtime {} + | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/duplicate_exclude.rs b/frame/support/test/tests/construct_runtime_ui/duplicate_exclude.rs new file mode 100644 index 0000000000000..6d21c2a6e170a --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/duplicate_exclude.rs @@ -0,0 +1,13 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + UncheckedExtrinsic = UncheckedExtrinsic, + Block = Block, + NodeBlock = Block, + { + System: frame_system exclude_parts { Call, Call }, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/duplicate_exclude.stderr b/frame/support/test/tests/construct_runtime_ui/duplicate_exclude.stderr new file mode 100644 index 0000000000000..75de56076528b --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/duplicate_exclude.stderr @@ -0,0 +1,5 @@ +error: `Call` was already declared before. Please remove the duplicate declaration + --> $DIR/duplicate_exclude.rs:9:46 + | +9 | System: frame_system exclude_parts { Call, Call }, + | ^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_missspell.rs b/frame/support/test/tests/construct_runtime_ui/exclude_missspell.rs new file mode 100644 index 0000000000000..16cbf1e82cf89 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/exclude_missspell.rs @@ -0,0 +1,13 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + UncheckedExtrinsic = UncheckedExtrinsic, + Block = Block, + NodeBlock = Block, + { + System: frame_system exclude_part { Call }, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_missspell.stderr b/frame/support/test/tests/construct_runtime_ui/exclude_missspell.stderr new file mode 100644 index 0000000000000..82e6aa6c8e308 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/exclude_missspell.stderr @@ -0,0 +1,5 @@ +error: Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,` + --> $DIR/exclude_missspell.rs:9:24 + | +9 | System: frame_system exclude_part { Call }, + | ^^^^^^^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs new file mode 100644 index 0000000000000..51be7e30bd3eb --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs @@ -0,0 +1,38 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + type Foo = StorageValue; +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet exclude_parts { Call }, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr new file mode 100644 index 0000000000000..4e31cfb75c074 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr @@ -0,0 +1,28 @@ +error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. + --> $DIR/exclude_undefined_part.rs:34:34 + | +34 | Pallet: pallet exclude_parts { Call }, + | ^^^^ + +error[E0412]: cannot find type `Call` in this scope + --> $DIR/exclude_undefined_part.rs:23:64 + | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | ^^^^ not found in this scope + | +help: consider importing one of these items + | +1 | use crate::pallet::Call; + | +1 | use frame_support_test::Call; + | +1 | use frame_system::Call; + | +1 | use test_pallet::Call; + | + +error[E0412]: cannot find type `Runtime` in this scope + --> $DIR/exclude_undefined_part.rs:25:25 + | +25 | impl pallet::Config for Runtime {} + | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr index 50505b9130cbe..db96b8749ca11 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_module_details.stderr @@ -1,5 +1,5 @@ -error: expected one of: identifier, curly braces, `<` - --> $DIR/invalid_module_details.rs:9:19 +error: Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,` + --> $DIR/invalid_module_details.rs:9:17 | 9 | system: System::(), - | ^^ + | ^^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr index 3b967f96d7b4e..6025de82bd206 100644 --- a/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/invalid_token_after_module.stderr @@ -1,4 +1,4 @@ -error: expected `::` +error: Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,` --> $DIR/invalid_token_after_module.rs:9:18 | 9 | system: System ? diff --git a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs new file mode 100644 index 0000000000000..706d444f23590 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs @@ -0,0 +1,26 @@ +use frame_support::construct_runtime; + +mod pallet_old { + pub trait Config: frame_system::Config {} + + decl_storage! { + trait Store for Module as Example {} + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + +} +construct_runtime! { + pub enum Runtime where + UncheckedExtrinsic = UncheckedExtrinsic, + Block = Block, + NodeBlock = Block, + { + System: frame_system, + OldPallet: pallet_old, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr new file mode 100644 index 0000000000000..f8ec07e00106f --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.stderr @@ -0,0 +1,31 @@ +error[E0433]: failed to resolve: could not find `tt_default_parts` in `pallet_old` + --> $DIR/old_unsupported_pallet_decl.rs:15:1 + | +15 | / construct_runtime! { +16 | | pub enum Runtime where +17 | | UncheckedExtrinsic = UncheckedExtrinsic, +18 | | Block = Block, +... | +23 | | } +24 | | } + | |_^ could not find `tt_default_parts` in `pallet_old` + | + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: cannot find macro `decl_storage` in this scope + --> $DIR/old_unsupported_pallet_decl.rs:6:2 + | +6 | decl_storage! { + | ^^^^^^^^^^^^ + | + = note: consider importing this macro: + frame_support::decl_storage + +error: cannot find macro `decl_module` in this scope + --> $DIR/old_unsupported_pallet_decl.rs:10:2 + | +10 | decl_module! { + | ^^^^^^^^^^^ + | + = note: consider importing this macro: + frame_support::decl_module diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs new file mode 100644 index 0000000000000..1664dcc42b755 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs @@ -0,0 +1,38 @@ +use frame_support::construct_runtime; +use sp_runtime::{generic, traits::BlakeTwo256}; +use sp_core::sr25519; + +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + type Foo = StorageValue; +} + +pub type Signature = sr25519::Signature; +pub type BlockNumber = u64; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + +impl pallet::Config for Runtime {} + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet, Call, Storage, Config, Event}, + Pallet: pallet use_parts { Call }, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr new file mode 100644 index 0000000000000..ed41f0ce673a4 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr @@ -0,0 +1,28 @@ +error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. + --> $DIR/use_undefined_part.rs:34:30 + | +34 | Pallet: pallet use_parts { Call }, + | ^^^^ + +error[E0412]: cannot find type `Call` in this scope + --> $DIR/use_undefined_part.rs:23:64 + | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | ^^^^ not found in this scope + | +help: consider importing one of these items + | +1 | use crate::pallet::Call; + | +1 | use frame_support_test::Call; + | +1 | use frame_system::Call; + | +1 | use test_pallet::Call; + | + +error[E0412]: cannot find type `Runtime` in this scope + --> $DIR/use_undefined_part.rs:25:25 + | +25 | impl pallet::Config for Runtime {} + | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index dc72be3ebdd49..a314f576187dc 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -509,6 +509,18 @@ pub mod pallet3 { pub struct Pallet(_); } +#[frame_support::pallet] +pub mod pallet4 { + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet {} +} + frame_support::parameter_types!( pub const MyGetParam: u32 = 10; pub const MyGetParam2: u32 = 11; @@ -553,6 +565,8 @@ impl pallet2::Config for Runtime { type Event = Event; } +impl pallet4::Config for Runtime {} + pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; @@ -563,12 +577,21 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Call, Event}, - Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, - Example2: pallet2::{Pallet, Call, Event, Config, Storage}, + // Exclude part `Storage` in order not to check its metadata in tests. + System: frame_system exclude_parts { Pallet, Storage }, + Example: pallet, + Example2: pallet2 exclude_parts { Call }, + Example4: pallet4 use_parts { Call }, } ); +// Test that the part `Call` is excluded from Example2 and included in Example4. +fn _ensure_call_is_correctly_excluded_and_included(call: Call) { + match call { + Call::System(_) | Call::Example(_) | Call::Example4(_) => (), + } +} + #[test] fn transactional_works() { TestExternalities::default().execute_with(|| { @@ -995,8 +1018,8 @@ fn migrate_from_pallet_version_to_storage_version() { AllPalletsWithSystem, >(&db_weight); - // 3 pallets, 2 writes and every write costs 5 weight. - assert_eq!(3 * 2 * 5, weight); + // 4 pallets, 2 writes and every write costs 5 weight. + assert_eq!(4 * 2 * 5, weight); // All pallet versions should be removed assert!(sp_io::storage::get(&pallet_version_key(Example::name())).is_none()); @@ -1268,7 +1291,7 @@ fn metadata() { }, ], }), - calls: Some(meta_type::>().into()), + calls: None, event: Some(PalletEventMetadata { ty: meta_type::() }), constants: vec![], error: None, diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 3a1009402d6f2..c031ac9fe1bf5 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -302,13 +302,12 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Pallet, Call, Event}, - Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, - Instance1Example: pallet::::{ - Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned - }, - Example2: pallet2::{Pallet, Event, Config, Storage}, - Instance1Example2: pallet2::::{Pallet, Event, Config, Storage}, + // Exclude part `Storage` in order not to check its metadata in tests. + System: frame_system exclude_parts { Storage }, + Example: pallet, + Instance1Example: pallet::, + Example2: pallet2, + Instance1Example2: pallet2::, } ); @@ -601,7 +600,7 @@ fn metadata() { let system_pallet_metadata = PalletMetadata { index: 0, name: "System", - storage: None, + storage: None, // The storage metadatas have been excluded. calls: Some(scale_info::meta_type::>().into()), event: Some(PalletEventMetadata { ty: scale_info::meta_type::>(), From b63e1c525baa33097ceccbd2ca828acbd36b0262 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 31 Oct 2021 19:13:19 +0100 Subject: [PATCH 028/162] Offchain-worker: Make it possible to disable http support (#10087) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Offchain-worker: Make it possible to disable http support If a chain doesn't require http support in its offchain workers, this pr enables them to disable the http support. * Switch to bitflags * Use Capabilities * Update client/offchain/src/lib.rs Co-authored-by: Tomasz Drwięga * Fix test * Update client/offchain/src/lib.rs Co-authored-by: Tomasz Drwięga Co-authored-by: Tomasz Drwięga --- Cargo.lock | 2 + client/api/src/execution_extensions.rs | 10 +- client/offchain/Cargo.toml | 1 + client/offchain/src/api.rs | 12 +-- client/offchain/src/api/http.rs | 49 +++++++++- client/offchain/src/lib.rs | 43 ++++++--- primitives/core/Cargo.toml | 1 + primitives/core/src/lib.rs | 12 +-- primitives/core/src/offchain/mod.rs | 128 ++++++++++--------------- 9 files changed, 144 insertions(+), 114 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 84ad0c0a563c0..666fe6e451831 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8184,6 +8184,7 @@ dependencies = [ "lazy_static", "log 0.4.14", "num_cpus", + "once_cell", "parity-scale-codec", "parking_lot 0.11.1", "rand 0.7.3", @@ -9232,6 +9233,7 @@ name = "sp-core" version = "4.0.0-dev" dependencies = [ "base58", + "bitflags", "blake2-rfc", "byteorder", "criterion", diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index ec44294b8a96c..56e70cc2b6a62 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -161,13 +161,13 @@ impl ExecutionExtensions { let mut extensions = self.extensions_factory.read().extensions_for(capabilities); - if capabilities.has(offchain::Capability::Keystore) { + if capabilities.contains(offchain::Capabilities::KEYSTORE) { if let Some(ref keystore) = self.keystore { extensions.register(KeystoreExt(keystore.clone())); } } - if capabilities.has(offchain::Capability::TransactionPool) { + if capabilities.contains(offchain::Capabilities::TRANSACTION_POOL) { if let Some(pool) = self.transaction_pool.read().as_ref().and_then(|x| x.upgrade()) { extensions .register(TransactionPoolExt( @@ -176,8 +176,8 @@ impl ExecutionExtensions { } } - if capabilities.has(offchain::Capability::OffchainDbRead) || - capabilities.has(offchain::Capability::OffchainDbWrite) + if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) || + capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE) { if let Some(offchain_db) = self.offchain_db.as_ref() { extensions.register(OffchainDbExt::new(offchain::LimitedExternalities::new( @@ -210,7 +210,7 @@ impl ExecutionExtensions { ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), ExecutionContext::Syncing => self.strategies.syncing.get_manager(), ExecutionContext::Importing => self.strategies.importing.get_manager(), - ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => + ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.is_all() => self.strategies.offchain_worker.get_manager(), ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(), }; diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 146ce07e1303b..104a0e61f3180 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -33,6 +33,7 @@ sc-utils = { version = "4.0.0-dev", path = "../utils" } threadpool = "1.7" hyper = "0.14.11" hyper-rustls = "0.22.1" +once_cell = "1.8" [dev-dependencies] sc-client-db = { version = "0.10.0-dev", default-features = true, path = "../db" } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index b2276a852372f..07136d1815b91 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -298,9 +298,9 @@ impl AsyncApi { pub fn new( network_provider: Arc, is_validator: bool, - shared_client: SharedClient, + shared_http_client: SharedClient, ) -> (Api, Self) { - let (http_api, http_worker) = http::http(shared_client); + let (http_api, http_worker) = http::http(shared_http_client); let api = Api { network_provider, is_validator, http: http_api }; @@ -310,10 +310,8 @@ impl AsyncApi { } /// Run a processing task for the API - pub fn process(mut self) -> impl Future { - let http = self.http.take().expect("Take invoked only once."); - - http + pub fn process(self) -> impl Future { + self.http.expect("`process` is only called once; qed") } } @@ -328,7 +326,7 @@ mod tests { time::SystemTime, }; - struct TestNetwork(); + pub(super) struct TestNetwork(); impl NetworkProvider for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 31f7d60e34ff9..a2975bad16528 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -34,6 +34,7 @@ use futures::{channel::mpsc, future, prelude::*}; use hyper::{client, Body, Client as HyperClient}; use hyper_rustls::HttpsConnector; use log::error; +use once_cell::sync::Lazy; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; use std::{ @@ -47,11 +48,13 @@ use std::{ /// Wrapper struct used for keeping the hyper_rustls client running. #[derive(Clone)] -pub struct SharedClient(Arc, Body>>); +pub struct SharedClient(Arc, Body>>>); impl SharedClient { pub fn new() -> Self { - Self(Arc::new(HyperClient::builder().build(HttpsConnector::with_native_roots()))) + Self(Arc::new(Lazy::new(|| { + HyperClient::builder().build(HttpsConnector::with_native_roots()) + }))) } } @@ -567,7 +570,7 @@ pub struct HttpWorker { /// Used to receive messages from the `HttpApi`. from_api: TracingUnboundedReceiver, /// The engine that runs HTTP requests. - http_client: Arc, Body>>, + http_client: Arc, Body>>>, /// HTTP requests that are being worked on by the engine. requests: Vec<(HttpRequestId, HttpWorkerRequest)>, } @@ -697,12 +700,15 @@ impl fmt::Debug for HttpWorkerRequest { #[cfg(test)] mod tests { - use super::{http, SharedClient}; + use super::{ + super::{tests::TestNetwork, AsyncApi}, + *, + }; use crate::api::timestamp; use core::convert::Infallible; use futures::{future, StreamExt}; use lazy_static::lazy_static; - use sp_core::offchain::{Duration, HttpError, HttpRequestId, HttpRequestStatus}; + use sp_core::offchain::{Duration, Externalities, HttpError, HttpRequestId, HttpRequestStatus}; // Using lazy_static to avoid spawning lots of different SharedClients, // as spawning a SharedClient is CPU-intensive and opens lots of fds. @@ -1006,4 +1012,37 @@ mod tests { } } } + + #[test] + fn shared_http_client_is_only_initialized_on_access() { + let shared_client = SharedClient::new(); + + { + let mock = Arc::new(TestNetwork()); + let (mut api, async_api) = AsyncApi::new(mock, false, shared_client.clone()); + api.timestamp(); + + futures::executor::block_on(async move { + assert!(futures::poll!(async_api.process()).is_pending()); + }); + } + + // Check that the http client wasn't initialized, because it wasn't used. + assert!(Lazy::into_value(Arc::try_unwrap(shared_client.0).unwrap()).is_err()); + + let shared_client = SharedClient::new(); + + { + let mock = Arc::new(TestNetwork()); + let (mut api, async_api) = AsyncApi::new(mock, false, shared_client.clone()); + let id = api.http_request_start("lol", "nope", &[]).unwrap(); + api.http_request_write_body(id, &[], None).unwrap(); + futures::executor::block_on(async move { + assert!(futures::poll!(async_api.process()).is_pending()); + }); + } + + // Check that the http client initialized, because it was used. + assert!(Lazy::into_value(Arc::try_unwrap(shared_client.0).unwrap()).is_ok()); + } } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index be6e4238ca5f1..a77fd17a2c8b8 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -81,18 +81,31 @@ where } } +/// Options for [`OffchainWorkers`] +pub struct OffchainWorkerOptions { + /// Enable http requests from offchain workers? + /// + /// If not enabled, any http request will panic. + pub enable_http_requests: bool, +} + /// An offchain workers manager. pub struct OffchainWorkers { client: Arc, _block: PhantomData, thread_pool: Mutex, - shared_client: api::SharedClient, + shared_http_client: api::SharedClient, + enable_http: bool, } impl OffchainWorkers { - /// Creates new `OffchainWorkers`. + /// Creates new [`OffchainWorkers`]. pub fn new(client: Arc) -> Self { - let shared_client = api::SharedClient::new(); + Self::new_with_options(client, OffchainWorkerOptions { enable_http_requests: true }) + } + + /// Creates new [`OffchainWorkers`] using the given `options`. + pub fn new_with_options(client: Arc, options: OffchainWorkerOptions) -> Self { Self { client, _block: PhantomData, @@ -100,7 +113,8 @@ impl OffchainWorkers { "offchain-worker".into(), num_cpus::get(), )), - shared_client, + shared_http_client: api::SharedClient::new(), + enable_http: options.enable_http_requests, } } } @@ -140,18 +154,22 @@ where }, }; debug!("Checking offchain workers at {:?}: version:{}", at, version); - if version > 0 { + let process = (version > 0).then(|| { let (api, runner) = - api::AsyncApi::new(network_provider, is_validator, self.shared_client.clone()); + api::AsyncApi::new(network_provider, is_validator, self.shared_http_client.clone()); debug!("Spawning offchain workers at {:?}", at); let header = header.clone(); let client = self.client.clone(); + + let mut capabilities = offchain::Capabilities::all(); + + capabilities.set(offchain::Capabilities::HTTP, self.enable_http); self.spawn_worker(move || { let runtime = client.runtime_api(); let api = Box::new(api); debug!("Running offchain workers at {:?}", at); - let context = - ExecutionContext::OffchainCall(Some((api, offchain::Capabilities::all()))); + + let context = ExecutionContext::OffchainCall(Some((api, capabilities))); let run = if version == 2 { runtime.offchain_worker_with_context(&at, context, &header) } else { @@ -166,9 +184,12 @@ where log::error!("Error running offchain workers at {:?}: {:?}", at, e); } }); - futures::future::Either::Left(runner.process()) - } else { - futures::future::Either::Right(futures::future::ready(())) + + runner.process() + }); + + async move { + futures::future::OptionFuture::from(process).await; } } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index dd721d744f573..14a76e2482444 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -49,6 +49,7 @@ parity-util-mem = { version = "0.10.0", default-features = false, features = [ futures = { version = "0.3.1", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.21", optional = true } +bitflags = "1.3" # full crypto ed25519-dalek = { version = "1.0.1", default-features = false, features = [ diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index a6229fe43a1a5..fd752397cd9a9 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -118,15 +118,13 @@ impl ExecutionContext { use ExecutionContext::*; match self { - Importing | Syncing | BlockConstruction => offchain::Capabilities::none(), + Importing | Syncing | BlockConstruction => offchain::Capabilities::empty(), // Enable keystore, transaction pool and Offchain DB reads by default for offchain // calls. - OffchainCall(None) => [ - offchain::Capability::Keystore, - offchain::Capability::OffchainDbRead, - offchain::Capability::TransactionPool, - ][..] - .into(), + OffchainCall(None) => + offchain::Capabilities::KEYSTORE | + offchain::Capabilities::OFFCHAIN_DB_READ | + offchain::Capabilities::TRANSACTION_POOL, OffchainCall(Some((_, capabilities))) => *capabilities, } } diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index 640f4d2583b79..dfe23c1ff8f18 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -258,65 +258,35 @@ impl Timestamp { } } -/// Execution context extra capabilities. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -#[repr(u8)] -pub enum Capability { - /// Access to transaction pool. - TransactionPool = 1, - /// External http calls. - Http = 2, - /// Keystore access. - Keystore = 4, - /// Randomness source. - Randomness = 8, - /// Access to opaque network state. - NetworkState = 16, - /// Access to offchain worker DB (read only). - OffchainDbRead = 32, - /// Access to offchain worker DB (writes). - OffchainDbWrite = 64, - /// Manage the authorized nodes - NodeAuthorization = 128, -} - -/// A set of capabilities -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct Capabilities(u8); - -impl Capabilities { - /// Return an object representing an empty set of capabilities. - pub fn none() -> Self { - Self(0) - } - - /// Return an object representing all capabilities enabled. - pub fn all() -> Self { - Self(u8::MAX) +bitflags::bitflags! { + /// Execution context extra capabilities. + pub struct Capabilities: u32 { + /// Access to transaction pool. + const TRANSACTION_POOL = 0b0000_0001; + /// External http calls. + const HTTP = 0b0000_0010; + /// Keystore access. + const KEYSTORE = 0b0000_0100; + /// Randomness source. + const RANDOMNESS = 0b0000_1000; + /// Access to opaque network state. + const NETWORK_STATE = 0b0001_0000; + /// Access to offchain worker DB (read only). + const OFFCHAIN_DB_READ = 0b0010_0000; + /// Access to offchain worker DB (writes). + const OFFCHAIN_DB_WRITE = 0b0100_0000; + /// Manage the authorized nodes + const NODE_AUTHORIZATION = 0b1000_0000; } +} +impl Capabilities { /// Return capabilities for rich offchain calls. /// /// Those calls should be allowed to sign and submit transactions /// and access offchain workers database (but read only!). pub fn rich_offchain_call() -> Self { - [Capability::TransactionPool, Capability::Keystore, Capability::OffchainDbRead][..].into() - } - - /// Check if particular capability is enabled. - pub fn has(&self, capability: Capability) -> bool { - self.0 & capability as u8 != 0 - } - - /// Check if this capability object represents all capabilities. - pub fn has_all(&self) -> bool { - self == &Capabilities::all() - } -} - -impl<'a> From<&'a [Capability]> for Capabilities { - fn from(list: &'a [Capability]) -> Self { - Capabilities(list.iter().fold(0_u8, |a, b| a | *b as u8)) + Capabilities::TRANSACTION_POOL | Capabilities::KEYSTORE | Capabilities::OFFCHAIN_DB_READ } } @@ -552,8 +522,8 @@ impl LimitedExternalities { /// Check if given capability is allowed. /// /// Panics in case it is not. - fn check(&self, capability: Capability, name: &'static str) { - if !self.capabilities.has(capability) { + fn check(&self, capability: Capabilities, name: &'static str) { + if !self.capabilities.contains(capability) { panic!("Accessing a forbidden API: {}. No: {:?} capability.", name, capability); } } @@ -561,27 +531,27 @@ impl LimitedExternalities { impl Externalities for LimitedExternalities { fn is_validator(&self) -> bool { - self.check(Capability::Keystore, "is_validator"); + self.check(Capabilities::KEYSTORE, "is_validator"); self.externalities.is_validator() } fn network_state(&self) -> Result { - self.check(Capability::NetworkState, "network_state"); + self.check(Capabilities::NETWORK_STATE, "network_state"); self.externalities.network_state() } fn timestamp(&mut self) -> Timestamp { - self.check(Capability::Http, "timestamp"); + self.check(Capabilities::HTTP, "timestamp"); self.externalities.timestamp() } fn sleep_until(&mut self, deadline: Timestamp) { - self.check(Capability::Http, "sleep_until"); + self.check(Capabilities::HTTP, "sleep_until"); self.externalities.sleep_until(deadline) } fn random_seed(&mut self) -> [u8; 32] { - self.check(Capability::Randomness, "random_seed"); + self.check(Capabilities::RANDOMNESS, "random_seed"); self.externalities.random_seed() } @@ -591,7 +561,7 @@ impl Externalities for LimitedExternalities { uri: &str, meta: &[u8], ) -> Result { - self.check(Capability::Http, "http_request_start"); + self.check(Capabilities::HTTP, "http_request_start"); self.externalities.http_request_start(method, uri, meta) } @@ -601,7 +571,7 @@ impl Externalities for LimitedExternalities { name: &str, value: &str, ) -> Result<(), ()> { - self.check(Capability::Http, "http_request_add_header"); + self.check(Capabilities::HTTP, "http_request_add_header"); self.externalities.http_request_add_header(request_id, name, value) } @@ -611,7 +581,7 @@ impl Externalities for LimitedExternalities { chunk: &[u8], deadline: Option, ) -> Result<(), HttpError> { - self.check(Capability::Http, "http_request_write_body"); + self.check(Capabilities::HTTP, "http_request_write_body"); self.externalities.http_request_write_body(request_id, chunk, deadline) } @@ -620,12 +590,12 @@ impl Externalities for LimitedExternalities { ids: &[HttpRequestId], deadline: Option, ) -> Vec { - self.check(Capability::Http, "http_response_wait"); + self.check(Capabilities::HTTP, "http_response_wait"); self.externalities.http_response_wait(ids, deadline) } fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { - self.check(Capability::Http, "http_response_headers"); + self.check(Capabilities::HTTP, "http_response_headers"); self.externalities.http_response_headers(request_id) } @@ -635,12 +605,12 @@ impl Externalities for LimitedExternalities { buffer: &mut [u8], deadline: Option, ) -> Result { - self.check(Capability::Http, "http_response_read_body"); + self.check(Capabilities::HTTP, "http_response_read_body"); self.externalities.http_response_read_body(request_id, buffer, deadline) } fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { - self.check(Capability::NodeAuthorization, "set_authorized_nodes"); + self.check(Capabilities::NODE_AUTHORIZATION, "set_authorized_nodes"); self.externalities.set_authorized_nodes(nodes, authorized_only) } } @@ -724,12 +694,12 @@ impl DbExternalities for Box { impl DbExternalities for LimitedExternalities { fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - self.check(Capability::OffchainDbWrite, "local_storage_set"); + self.check(Capabilities::OFFCHAIN_DB_WRITE, "local_storage_set"); self.externalities.local_storage_set(kind, key, value) } fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - self.check(Capability::OffchainDbWrite, "local_storage_clear"); + self.check(Capabilities::OFFCHAIN_DB_WRITE, "local_storage_clear"); self.externalities.local_storage_clear(kind, key) } @@ -740,13 +710,13 @@ impl DbExternalities for LimitedExternalities { old_value: Option<&[u8]>, new_value: &[u8], ) -> bool { - self.check(Capability::OffchainDbWrite, "local_storage_compare_and_set"); + self.check(Capabilities::OFFCHAIN_DB_WRITE, "local_storage_compare_and_set"); self.externalities .local_storage_compare_and_set(kind, key, old_value, new_value) } fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - self.check(Capability::OffchainDbRead, "local_storage_get"); + self.check(Capabilities::OFFCHAIN_DB_READ, "local_storage_get"); self.externalities.local_storage_get(kind, key) } } @@ -815,15 +785,15 @@ mod tests { #[test] fn capabilities() { - let none = Capabilities::none(); + let none = Capabilities::empty(); let all = Capabilities::all(); - let some = Capabilities::from(&[Capability::Keystore, Capability::Randomness][..]); - - assert!(!none.has(Capability::Keystore)); - assert!(all.has(Capability::Keystore)); - assert!(some.has(Capability::Keystore)); - assert!(!none.has(Capability::TransactionPool)); - assert!(all.has(Capability::TransactionPool)); - assert!(!some.has(Capability::TransactionPool)); + let some = Capabilities::KEYSTORE | Capabilities::RANDOMNESS; + + assert!(!none.contains(Capabilities::KEYSTORE)); + assert!(all.contains(Capabilities::KEYSTORE)); + assert!(some.contains(Capabilities::KEYSTORE)); + assert!(!none.contains(Capabilities::TRANSACTION_POOL)); + assert!(all.contains(Capabilities::TRANSACTION_POOL)); + assert!(!some.contains(Capabilities::TRANSACTION_POOL)); } } From 26d69bcbe26f6b463e9374e1b1c54c3067fb6131 Mon Sep 17 00:00:00 2001 From: Zeke Mostov <32168567+emostov@users.noreply.github.com> Date: Sun, 31 Oct 2021 22:10:13 +0100 Subject: [PATCH 029/162] Fuzzer for Pallet Bags List (#9851) * Fuzzer for Pallet Bags List * Some small updates * Fuzzer for Pallet Bags List This PR adds a fuzzer for the `SortedListProvider` API exposed by pallet-bags-list. * Feature gate code NOT used by fuzz feature * Create Enum for list actions * fix some small mistakes * try and make CI happy * fmt * Do not insert before updating * clean up some misc. comments * marginally improve Node::sanity_check * Change ID_RANGE to 25_000 * comma * try improve correct feature gating so no unused code Co-authored-by: thiolliere --- Cargo.lock | 10 ++++ Cargo.toml | 1 + frame/bags-list/Cargo.toml | 6 ++ frame/bags-list/fuzzer/.gitignore | 2 + frame/bags-list/fuzzer/Cargo.toml | 22 ++++++++ frame/bags-list/fuzzer/src/main.rs | 88 ++++++++++++++++++++++++++++++ frame/bags-list/src/lib.rs | 4 +- frame/bags-list/src/list/mod.rs | 14 +++-- frame/bags-list/src/mock.rs | 7 ++- 9 files changed, 144 insertions(+), 10 deletions(-) create mode 100644 frame/bags-list/fuzzer/.gitignore create mode 100644 frame/bags-list/fuzzer/Cargo.toml create mode 100644 frame/bags-list/fuzzer/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 666fe6e451831..3434e01d13cd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5180,6 +5180,16 @@ dependencies = [ "sp-tracing", ] +[[package]] +name = "pallet-bags-list-fuzzer" +version = "4.0.0-dev" +dependencies = [ + "frame-election-provider-support", + "honggfuzz", + "pallet-bags-list", + "rand 0.8.4", +] + [[package]] name = "pallet-balances" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 4a228203159eb..743e0f7066647 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,6 +135,7 @@ members = [ "frame/utility", "frame/vesting", "frame/bags-list", + "frame/bags-list/fuzzer", "primitives/api", "primitives/api/proc-macro", "primitives/api/test", diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index cd06ce4a69983..372dc87e212e2 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -63,4 +63,10 @@ runtime-benchmarks = [ "sp-tracing", "frame-election-provider-support/runtime-benchmarks", ] +fuzz = [ + "sp-core", + "sp-io", + "pallet-balances", + "sp-tracing", +] diff --git a/frame/bags-list/fuzzer/.gitignore b/frame/bags-list/fuzzer/.gitignore new file mode 100644 index 0000000000000..3ebcb104d4a50 --- /dev/null +++ b/frame/bags-list/fuzzer/.gitignore @@ -0,0 +1,2 @@ +hfuzz_target +hfuzz_workspace diff --git a/frame/bags-list/fuzzer/Cargo.toml b/frame/bags-list/fuzzer/Cargo.toml new file mode 100644 index 0000000000000..171e0e7af70cd --- /dev/null +++ b/frame/bags-list/fuzzer/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "pallet-bags-list-fuzzer" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Fuzzer for FRAME pallet bags list" +readme = "README.md" +publish = false + +[dependencies] +honggfuzz = "0.5" +rand = { version = "0.8", features = ["std", "small_rng"] } + +pallet-bags-list = { version = "4.0.0-dev", features = ["fuzz"], path = ".." } +frame-election-provider-support = { version = "4.0.0-dev", path = "../../election-provider-support", features = ["runtime-benchmarks"] } + +[[bin]] +name = "bags-list" +path = "src/main.rs" diff --git a/frame/bags-list/fuzzer/src/main.rs b/frame/bags-list/fuzzer/src/main.rs new file mode 100644 index 0000000000000..02a2003b9a71f --- /dev/null +++ b/frame/bags-list/fuzzer/src/main.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run bags-list`. `honggfuzz` CLI options can +//! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug fixed_point hfuzz_workspace/bags_list/*.fuzz`. +//! +//! # More information +//! More information about `honggfuzz` can be found +//! [here](https://docs.rs/honggfuzz/). + +use frame_election_provider_support::{SortedListProvider, VoteWeight}; +use honggfuzz::fuzz; +use pallet_bags_list::mock::{AccountId, BagsList, ExtBuilder}; +use std::convert::From; + +const ID_RANGE: AccountId = 25_000; + +/// Actions of a `SortedListProvider` that we fuzz. +enum Action { + Insert, + Update, + Remove, +} + +impl From for Action { + fn from(v: u32) -> Self { + let num_variants = Self::Remove as u32 + 1; + match v % num_variants { + _x if _x == Action::Insert as u32 => Action::Insert, + _x if _x == Action::Update as u32 => Action::Update, + _x if _x == Action::Remove as u32 => Action::Remove, + _ => unreachable!(), + } + } +} + +fn main() { + ExtBuilder::default().build_and_execute(|| loop { + fuzz!(|data: (AccountId, VoteWeight, u32)| { + let (account_id_seed, vote_weight, action_seed) = data; + + let id = account_id_seed % ID_RANGE; + let action = Action::from(action_seed); + + match action { + Action::Insert => { + if BagsList::on_insert(id.clone(), vote_weight).is_err() { + // this was a duplicate id, which is ok. We can just update it. + BagsList::on_update(&id, vote_weight); + } + assert!(BagsList::contains(&id)); + }, + Action::Update => { + let already_contains = BagsList::contains(&id); + BagsList::on_update(&id, vote_weight); + if already_contains { + assert!(BagsList::contains(&id)); + } + }, + Action::Remove => { + BagsList::on_remove(&id); + assert!(!BagsList::contains(&id)); + }, + } + + assert!(BagsList::sanity_check().is_ok()); + }) + }); +} diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 4202a4d499895..10a692e8b3f95 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -59,8 +59,8 @@ use sp_std::prelude::*; mod benchmarks; mod list; -#[cfg(test)] -mod mock; +#[cfg(any(test, feature = "fuzz"))] +pub mod mock; #[cfg(test)] mod tests; pub mod weights; diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index 3f55f22271910..057565e645f90 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -391,8 +391,8 @@ impl List { /// /// * there are no duplicate ids, /// * length of this list is in sync with `CounterForListNodes`, - /// * and sanity-checks all bags. This will cascade down all the checks and makes sure all bags - /// are checked per *any* update to `List`. + /// * and sanity-checks all bags and nodes. This will cascade down all the checks and makes sure + /// all bags and nodes are checked per *any* update to `List`. #[cfg(feature = "std")] pub(crate) fn sanity_check() -> Result<(), &'static str> { use frame_support::ensure; @@ -414,7 +414,6 @@ impl List { let thresholds = T::BagThresholds::get().iter().copied(); let thresholds: Vec = if thresholds.clone().last() == Some(VoteWeight::MAX) { // in the event that they included it, we don't need to make any changes - // Box::new(thresholds.collect() thresholds.collect() } else { // otherwise, insert it here. @@ -774,10 +773,13 @@ impl Node { "node does not exist in the expected bag" ); + let non_terminal_check = !self.is_terminal() && + expected_bag.head.as_ref() != Some(id) && + expected_bag.tail.as_ref() != Some(id); + let terminal_check = + expected_bag.head.as_ref() == Some(id) || expected_bag.tail.as_ref() == Some(id); frame_support::ensure!( - !self.is_terminal() || - expected_bag.head.as_ref() == Some(id) || - expected_bag.tail.as_ref() == Some(id), + non_terminal_check || terminal_check, "a terminal node is neither its bag head or tail" ); diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index a6ab35896b1e7..45eb1d85abe3c 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -101,12 +101,13 @@ pub(crate) const GENESIS_IDS: [(AccountId, VoteWeight); 4] = [(1, 10), (2, 1_000), (3, 1_000), (4, 1_000)]; #[derive(Default)] -pub(crate) struct ExtBuilder { +pub struct ExtBuilder { ids: Vec<(AccountId, VoteWeight)>, } impl ExtBuilder { /// Add some AccountIds to insert into `List`. + #[cfg(test)] pub(crate) fn add_ids(mut self, ids: Vec<(AccountId, VoteWeight)>) -> Self { self.ids = ids; self @@ -126,18 +127,20 @@ impl ExtBuilder { ext } - pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { self.build().execute_with(|| { test(); List::::sanity_check().expect("Sanity check post condition failed") }) } + #[cfg(test)] pub(crate) fn build_and_execute_no_post_check(self, test: impl FnOnce() -> ()) { self.build().execute_with(test) } } +#[cfg(test)] pub(crate) mod test_utils { use super::*; use list::Bag; From dc29596e7c90e84e90198dee24c296e930b58c33 Mon Sep 17 00:00:00 2001 From: Koute Date: Mon, 1 Nov 2021 17:46:32 +0900 Subject: [PATCH 030/162] Strip out control codes from the logged messages (#10081) * Strip out control codes from the logged messages * Also strip away C1 control codes * Add extra comments * Clear the buffer after flushing; rename `write` to `flush` * Move control code stripping into its own function * Also strip out control codes from panic messages * Also strip out Unicode left-to-right/right-to-left control codes --- Cargo.lock | 2 + client/tracing/src/logging/event_format.rs | 65 +++++++++++++++------- client/tracing/src/logging/mod.rs | 32 +++++++++++ primitives/panic-handler/Cargo.toml | 2 + primitives/panic-handler/src/lib.rs | 61 ++++++++++++++++++++ 5 files changed, 142 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3434e01d13cd2..1ddf355c890d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9462,6 +9462,8 @@ name = "sp-panic-handler" version = "3.0.0" dependencies = [ "backtrace", + "lazy_static", + "regex", ] [[package]] diff --git a/client/tracing/src/logging/event_format.rs b/client/tracing/src/logging/event_format.rs index 5a21192d69c4d..944901d803bda 100644 --- a/client/tracing/src/logging/event_format.rs +++ b/client/tracing/src/logging/event_format.rs @@ -62,7 +62,7 @@ where S: Subscriber + for<'a> LookupSpan<'a>, N: for<'a> FormatFields<'a> + 'static, { - let writer = &mut MaybeColorWriter::new(self.enable_color, writer); + let writer = &mut ControlCodeSanitizer::new(!self.enable_color, writer); let normalized_meta = event.normalized_metadata(); let meta = normalized_meta.as_ref().unwrap_or_else(|| event.metadata()); time::write(&self.timer, writer, self.enable_color)?; @@ -100,10 +100,18 @@ where } } + // The writer only sanitizes its output once it's flushed, so if we don't actually need + // to sanitize everything we need to flush out what was already buffered as-is and only + // force-sanitize what follows. + if !writer.sanitize { + writer.flush()?; + writer.sanitize = true; + } + ctx.format_fields(writer, event)?; writeln!(writer)?; - writer.write() + writer.flush() } } @@ -294,43 +302,60 @@ where } } -/// A writer that may write to `inner_writer` with colors. +/// A writer which (optionally) strips out terminal control codes from the logs. /// -/// This is used by [`EventFormat`] to kill colors when `enable_color` is `false`. +/// This is used by [`EventFormat`] to sanitize the log messages. /// -/// It is required to call [`MaybeColorWriter::write`] after all writes are done, +/// It is required to call [`ControlCodeSanitizer::flush`] after all writes are done, /// because the content of these writes is buffered and will only be written to the /// `inner_writer` at that point. -struct MaybeColorWriter<'a> { - enable_color: bool, +struct ControlCodeSanitizer<'a> { + sanitize: bool, buffer: String, inner_writer: &'a mut dyn fmt::Write, } -impl<'a> fmt::Write for MaybeColorWriter<'a> { +impl<'a> fmt::Write for ControlCodeSanitizer<'a> { fn write_str(&mut self, buf: &str) -> fmt::Result { self.buffer.push_str(buf); Ok(()) } } -impl<'a> MaybeColorWriter<'a> { +// NOTE: When making any changes here make sure to also change this function in `sp-panic-handler`. +fn strip_control_codes(input: &str) -> std::borrow::Cow { + lazy_static::lazy_static! { + static ref RE: Regex = Regex::new(r#"(?x) + \x1b\[[^m]+m| # VT100 escape codes + [ + \x00-\x09\x0B-\x1F # ASCII control codes / Unicode C0 control codes, except \n + \x7F # ASCII delete + \u{80}-\u{9F} # Unicode C1 control codes + \u{202A}-\u{202E} # Unicode left-to-right / right-to-left control characters + \u{2066}-\u{2069} # Same as above + ] + "#).expect("regex parsing doesn't fail; qed"); + } + + RE.replace_all(input, "") +} + +impl<'a> ControlCodeSanitizer<'a> { /// Creates a new instance. - fn new(enable_color: bool, inner_writer: &'a mut dyn fmt::Write) -> Self { - Self { enable_color, inner_writer, buffer: String::new() } + fn new(sanitize: bool, inner_writer: &'a mut dyn fmt::Write) -> Self { + Self { sanitize, inner_writer, buffer: String::new() } } /// Write the buffered content to the `inner_writer`. - fn write(&mut self) -> fmt::Result { - lazy_static::lazy_static! { - static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); - } - - if !self.enable_color { - let replaced = RE.replace_all(&self.buffer, ""); - self.inner_writer.write_str(&replaced) + fn flush(&mut self) -> fmt::Result { + if self.sanitize { + let replaced = strip_control_codes(&self.buffer); + self.inner_writer.write_str(&replaced)? } else { - self.inner_writer.write_str(&self.buffer) + self.inner_writer.write_str(&self.buffer)? } + + self.buffer.clear(); + Ok(()) } } diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 7f995615a223b..49807098d1cf4 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -595,4 +595,36 @@ mod tests { assert!(stderr.contains(&line)); } } + + #[test] + fn control_characters_are_always_stripped_out_from_the_log_messages() { + const RAW_LINE: &str = "$$START$$\x1B[1;32mIn\u{202a}\u{202e}\u{2066}\u{2069}ner\n\r\x7ftext!\u{80}\u{9f}\x1B[0m$$END$$"; + const SANITIZED_LINE: &str = "$$START$$Inner\ntext!$$END$$"; + + let output = run_test_in_another_process( + "control_characters_are_always_stripped_out_from_the_log_messages", + || { + std::env::set_var("RUST_LOG", "trace"); + let mut builder = LoggerBuilder::new(""); + builder.with_colors(true); + builder.init().unwrap(); + log::error!("{}", RAW_LINE); + }, + ); + + if let Some(output) = output { + let stderr = String::from_utf8(output.stderr).unwrap(); + // The log messages should always be sanitized. + assert!(!stderr.contains(RAW_LINE)); + assert!(stderr.contains(SANITIZED_LINE)); + + // The part where the timestamp, the logging level, etc. is printed out doesn't + // always have to be sanitized unless it's necessary, and here it shouldn't be. + assert!(stderr.contains("\x1B[31mERROR\x1B[0m")); + + // Make sure the logs aren't being duplicated. + assert_eq!(stderr.find("ERROR"), stderr.rfind("ERROR")); + assert_eq!(stderr.find(SANITIZED_LINE), stderr.rfind(SANITIZED_LINE)); + } + } } diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index c961d5b089abb..890cc277bd849 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -15,3 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] backtrace = "0.3.38" +regex = "1.5.4" +lazy_static = "1.4.0" diff --git a/primitives/panic-handler/src/lib.rs b/primitives/panic-handler/src/lib.rs index 75b057cebf3e4..eddb31a7f22ff 100644 --- a/primitives/panic-handler/src/lib.rs +++ b/primitives/panic-handler/src/lib.rs @@ -25,6 +25,7 @@ //! temporarily be disabled by using an [`AbortGuard`]. use backtrace::Backtrace; +use regex::Regex; use std::{ cell::Cell, io::{self, Write}, @@ -125,6 +126,24 @@ impl Drop for AbortGuard { } } +// NOTE: When making any changes here make sure to also change this function in `sc-tracing`. +fn strip_control_codes(input: &str) -> std::borrow::Cow { + lazy_static::lazy_static! { + static ref RE: Regex = Regex::new(r#"(?x) + \x1b\[[^m]+m| # VT100 escape codes + [ + \x00-\x09\x0B-\x1F # ASCII control codes / Unicode C0 control codes, except \n + \x7F # ASCII delete + \u{80}-\u{9F} # Unicode C1 control codes + \u{202A}-\u{202E} # Unicode left-to-right / right-to-left control characters + \u{2066}-\u{2069} # Same as above + ] + "#).expect("regex parsing doesn't fail; qed"); + } + + RE.replace_all(input, "") +} + /// Function being called when a panic happens. fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { let location = info.location(); @@ -139,6 +158,8 @@ fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { }, }; + let msg = strip_control_codes(&msg); + let thread = thread::current(); let name = thread.name().unwrap_or(""); @@ -181,4 +202,44 @@ mod tests { let _guard = AbortGuard::force_abort(); std::panic::catch_unwind(|| panic!()).ok(); } + + fn run_test_in_another_process( + test_name: &str, + test_body: impl FnOnce(), + ) -> Option { + if std::env::var("RUN_FORKED_TEST").is_ok() { + test_body(); + None + } else { + let output = std::process::Command::new(std::env::current_exe().unwrap()) + .arg(test_name) + .env("RUN_FORKED_TEST", "1") + .output() + .unwrap(); + + assert!(output.status.success()); + Some(output) + } + } + + #[test] + fn control_characters_are_always_stripped_out_from_the_panic_messages() { + const RAW_LINE: &str = "$$START$$\x1B[1;32mIn\u{202a}\u{202e}\u{2066}\u{2069}ner\n\r\x7ftext!\u{80}\u{9f}\x1B[0m$$END$$"; + const SANITIZED_LINE: &str = "$$START$$Inner\ntext!$$END$$"; + + let output = run_test_in_another_process( + "control_characters_are_always_stripped_out_from_the_panic_messages", + || { + set("test", "1.2.3"); + let _guard = AbortGuard::force_unwind(); + let _ = std::panic::catch_unwind(|| panic!("{}", RAW_LINE)); + }, + ); + + if let Some(output) = output { + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!(!stderr.contains(RAW_LINE)); + assert!(stderr.contains(SANITIZED_LINE)); + } + } } From efb72f6306e693ef3b653a52a354e048409baa26 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 1 Nov 2021 10:17:50 +0100 Subject: [PATCH 031/162] Fix transaction pool rejection (#10138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix transaction rejection * Update client/service/src/lib.rs Co-authored-by: Bastian Köcher --- client/service/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index a1ff8da4085c9..ce77be5a7c1d9 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -498,7 +498,7 @@ where fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture { if !self.imports_external_transactions { debug!("Transaction rejected"); - Box::pin(futures::future::ready(TransactionImport::None)); + return Box::pin(futures::future::ready(TransactionImport::None)) } let encoded = transaction.encode(); From 5fa8dc0f126a4e3c9d5bf02db0fd4206f953403d Mon Sep 17 00:00:00 2001 From: icodezjb <8869892+icodezjb@users.noreply.github.com> Date: Mon, 1 Nov 2021 20:58:18 +0800 Subject: [PATCH 032/162] Speed up big chainspec json(~1.5 GB) load (#10137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Speed up chainspec json load * Update client/chain-spec/src/chain_spec.rs * Update client/chain-spec/src/chain_spec.rs * Update client/chain-spec/src/chain_spec.rs * Load the chainspec through `mmap` Co-authored-by: icodezjb Co-authored-by: Bastian Köcher Co-authored-by: Jan Bujak --- Cargo.lock | 16 +++++++++++++--- client/chain-spec/Cargo.toml | 1 + client/chain-spec/src/chain_spec.rs | 12 +++++++++++- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ddf355c890d1..4774bfda1f7a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1648,7 +1648,7 @@ checksum = "42276e3f205fe63887cca255aa9a65a63fb72764c30b9a6252a7c7e46994f689" dependencies = [ "byteorder", "dynasm", - "memmap2", + "memmap2 0.2.1", ] [[package]] @@ -4097,6 +4097,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memmap2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4647a11b578fead29cdbb34d4adef8dd3dc35b876c9c6d5240d83f205abfe96e" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.6.4" @@ -6174,7 +6183,7 @@ dependencies = [ "libc", "log 0.4.14", "lz4", - "memmap2", + "memmap2 0.2.1", "parking_lot 0.11.1", "rand 0.8.4", "snap", @@ -7503,6 +7512,7 @@ name = "sc-chain-spec" version = "4.0.0-dev" dependencies = [ "impl-trait-for-tuples", + "memmap2 0.5.0", "parity-scale-codec", "sc-chain-spec-derive", "sc-network", @@ -11285,7 +11295,7 @@ dependencies = [ "backtrace", "bincode", "lazy_static", - "memmap2", + "memmap2 0.2.1", "more-asserts", "rustc-demangle", "serde", diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 78062e600c3f5..ba9655261923b 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -22,3 +22,4 @@ serde_json = "1.0.68" sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } codec = { package = "parity-scale-codec", version = "2.0.0" } +memmap2 = "0.5.0" diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index c4db6158125b1..2ddb56a0df845 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -285,10 +285,20 @@ impl ChainSpec { /// Parse json file into a `ChainSpec` pub fn from_json_file(path: PathBuf) -> Result { + // We mmap the file into memory first, as this is *a lot* faster than using + // `serde_json::from_reader`. See https://github.com/serde-rs/json/issues/160 let file = File::open(&path) .map_err(|e| format!("Error opening spec file `{}`: {}", path.display(), e))?; + + // SAFETY: `mmap` is fundamentally unsafe since technically the file can change + // underneath us while it is mapped; in practice it's unlikely to be a problem + let bytes = unsafe { + memmap2::Mmap::map(&file) + .map_err(|e| format!("Error mmaping spec file `{}`: {}", path.display(), e))? + }; + let client_spec = - json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; + json::from_slice(&bytes).map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(ChainSpec { client_spec, genesis: GenesisSource::File(path) }) } } From 4b40c2aa05930a0aae5a7eb0c50a596a44eb6fb1 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 1 Nov 2021 14:41:38 +0000 Subject: [PATCH 033/162] introduce remote-tests for pallet-bags-list (#10036) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * make a few primitives in bags-list public * push new stuff * update * update log target * bring remote tests here * revert pub * Update frame/bags-list/remote-tests/Cargo.toml Co-authored-by: Bastian Köcher * some rev commnets * Fix * cleanup * Update Cargo.lock Co-authored-by: Bastian Köcher Co-authored-by: Shawn Tabrizi --- Cargo.lock | 29 +++- Cargo.toml | 1 + frame/bags-list/remote-tests/Cargo.toml | 37 +++++ frame/bags-list/remote-tests/src/lib.rs | 134 ++++++++++++++++++ frame/bags-list/remote-tests/src/migration.rs | 65 +++++++++ .../remote-tests/src/sanity_check.rs | 54 +++++++ frame/bags-list/remote-tests/src/snapshot.rs | 86 +++++++++++ frame/bags-list/src/lib.rs | 6 +- frame/bags-list/src/list/mod.rs | 6 +- frame/staking/src/pallet/impls.rs | 13 +- utils/frame/remote-externalities/src/lib.rs | 4 +- 11 files changed, 420 insertions(+), 15 deletions(-) create mode 100644 frame/bags-list/remote-tests/Cargo.toml create mode 100644 frame/bags-list/remote-tests/src/lib.rs create mode 100644 frame/bags-list/remote-tests/src/migration.rs create mode 100644 frame/bags-list/remote-tests/src/sanity_check.rs create mode 100644 frame/bags-list/remote-tests/src/snapshot.rs diff --git a/Cargo.lock b/Cargo.lock index 4774bfda1f7a7..9686bf426fd26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5199,6 +5199,27 @@ dependencies = [ "rand 0.8.4", ] +[[package]] +name = "pallet-bags-list-remote-tests" +version = "4.0.0-dev" +dependencies = [ + "clap", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log 0.4.14", + "pallet-bags-list", + "pallet-staking", + "remote-externalities", + "sp-core", + "sp-runtime", + "sp-std", + "sp-storage", + "sp-tracing", + "structopt", + "tokio", +] + [[package]] name = "pallet-balances" version = "4.0.0-dev" @@ -9858,9 +9879,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "structopt" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" +checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" dependencies = [ "clap", "lazy_static", @@ -9869,9 +9890,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.14" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" +checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" dependencies = [ "heck", "proc-macro-error 1.0.4", diff --git a/Cargo.toml b/Cargo.toml index 743e0f7066647..197b156dea2ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,6 +135,7 @@ members = [ "frame/utility", "frame/vesting", "frame/bags-list", + "frame/bags-list/remote-tests", "frame/bags-list/fuzzer", "primitives/api", "primitives/api/proc-macro", diff --git a/frame/bags-list/remote-tests/Cargo.toml b/frame/bags-list/remote-tests/Cargo.toml new file mode 100644 index 0000000000000..c670178c6188a --- /dev/null +++ b/frame/bags-list/remote-tests/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "pallet-bags-list-remote-tests" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet bags list remote test" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# frame +pallet-staking = { path = "../../staking" } +pallet-bags-list = { path = "../../bags-list" } +frame-election-provider-support = { path = "../../election-provider-support" } +frame-system = { path = "../../system" } +frame-support = { path = "../../support" } + +# core +sp-storage = { path = "../../../primitives/storage" } +sp-core = { path = "../../../primitives/core" } +sp-tracing = { path = "../../../primitives/tracing" } +sp-runtime = { path = "../../../primitives/runtime" } +sp-std = { path = "../../../primitives/std" } + +# utils +remote-externalities = { path = "../../../utils/frame/remote-externalities" } + +# others +tokio = { version = "1", features = ["macros"] } +log = "0.4.14" +structopt = "0.3.23" +clap = "2.33.3" diff --git a/frame/bags-list/remote-tests/src/lib.rs b/frame/bags-list/remote-tests/src/lib.rs new file mode 100644 index 0000000000000..e471c4c95bdbc --- /dev/null +++ b/frame/bags-list/remote-tests/src/lib.rs @@ -0,0 +1,134 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Utilities for remote-testing pallet-bags-list. + +use sp_std::convert::TryInto; + +/// A common log target to use. +pub const LOG_TARGET: &'static str = "runtime::bags-list::remote-tests"; + +pub mod migration; +pub mod sanity_check; +pub mod snapshot; + +/// A wrapper for a runtime that the functions of this crate expect. +/// +/// For example, this can be the `Runtime` type of the Polkadot runtime. +pub trait RuntimeT: + pallet_staking::Config + pallet_bags_list::Config + frame_system::Config +{ +} +impl RuntimeT for T {} + +fn percent(portion: u32, total: u32) -> f64 { + (portion as f64 / total as f64) * 100f64 +} + +/// Display the number of nodes in each bag, while identifying those that need a rebag. +pub fn display_and_check_bags(currency_unit: u64, currency_name: &'static str) { + use frame_election_provider_support::SortedListProvider; + use frame_support::traits::Get; + + let min_nominator_bond = >::get(); + log::info!(target: LOG_TARGET, "min nominator bond is {:?}", min_nominator_bond); + + let voter_list_count = ::SortedListProvider::count(); + + // go through every bag to track the total number of voters within bags and log some info about + // how voters are distributed within the bags. + let mut seen_in_bags = 0; + let mut rebaggable = 0; + let mut active_bags = 0; + for vote_weight_thresh in ::BagThresholds::get() { + // threshold in terms of UNITS (e.g. KSM, DOT etc) + let vote_weight_thresh_as_unit = *vote_weight_thresh as f64 / currency_unit as f64; + let pretty_thresh = format!("Threshold: {}. {}", vote_weight_thresh_as_unit, currency_name); + + let bag = match pallet_bags_list::Pallet::::list_bags_get(*vote_weight_thresh) { + Some(bag) => bag, + None => { + log::info!(target: LOG_TARGET, "{} NO VOTERS.", pretty_thresh); + continue + }, + }; + + active_bags += 1; + + for id in bag.std_iter().map(|node| node.std_id().clone()) { + let vote_weight = pallet_staking::Pallet::::weight_of(&id); + let vote_weight_as_balance: pallet_staking::BalanceOf = + vote_weight.try_into().map_err(|_| "can't convert").unwrap(); + + if vote_weight_as_balance < min_nominator_bond { + log::trace!( + target: LOG_TARGET, + "⚠️ {} Account found below min bond: {:?}.", + pretty_thresh, + id + ); + } + + let node = + pallet_bags_list::Node::::get(&id).expect("node in bag must exist."); + if node.is_misplaced(vote_weight) { + rebaggable += 1; + log::trace!( + target: LOG_TARGET, + "Account {:?} can be rebagged from {:?} to {:?}", + id, + vote_weight_thresh_as_unit, + pallet_bags_list::notional_bag_for::(vote_weight) as f64 / + currency_unit as f64 + ); + } + } + + // update our overall counter + let voters_in_bag = bag.std_iter().count() as u32; + seen_in_bags += voters_in_bag; + + // percentage of all nominators + let percent_of_voters = percent(voters_in_bag, voter_list_count); + + log::info!( + target: LOG_TARGET, + "{} Nominators: {} [%{:.3}]", + pretty_thresh, + voters_in_bag, + percent_of_voters, + ); + } + + if seen_in_bags != voter_list_count { + log::error!( + target: LOG_TARGET, + "bags list population ({}) not on par whoever is voter_list ({})", + seen_in_bags, + voter_list_count, + ) + } + + log::info!( + target: LOG_TARGET, + "a total of {} nodes are in {} active bags [{} total bags], {} of which can be rebagged.", + voter_list_count, + active_bags, + ::BagThresholds::get().len(), + rebaggable, + ); +} diff --git a/frame/bags-list/remote-tests/src/migration.rs b/frame/bags-list/remote-tests/src/migration.rs new file mode 100644 index 0000000000000..1e977011f1439 --- /dev/null +++ b/frame/bags-list/remote-tests/src/migration.rs @@ -0,0 +1,65 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test to check the migration of the voter bag. + +use crate::{RuntimeT, LOG_TARGET}; +use frame_election_provider_support::SortedListProvider; +use frame_support::traits::PalletInfoAccess; +use pallet_staking::Nominators; +use remote_externalities::{Builder, Mode, OnlineConfig}; +use sp_runtime::traits::Block as BlockT; + +/// Test voter bags migration. `currency_unit` is the number of planks per the the runtimes `UNITS` +/// (i.e. number of decimal places per DOT, KSM etc) +pub async fn execute( + currency_unit: u64, + currency_name: &'static str, + ws_url: String, +) { + let mut ext = Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: ws_url.to_string().into(), + pallets: vec![pallet_staking::Pallet::::name().to_string()], + at: None, + state_snapshot: None, + })) + .build() + .await + .unwrap(); + + ext.execute_with(|| { + // get the nominator & validator count prior to migrating; these should be invariant. + let pre_migrate_nominator_count = >::iter().count() as u32; + log::info!(target: LOG_TARGET, "Nominator count: {}", pre_migrate_nominator_count); + + // run the actual migration, + let moved = ::SortedListProvider::regenerate( + pallet_staking::Nominators::::iter().map(|(n, _)| n), + pallet_staking::Pallet::::weight_of_fn(), + ); + log::info!(target: LOG_TARGET, "Moved {} nominators", moved); + + let voter_list_len = + ::SortedListProvider::iter().count() as u32; + let voter_list_count = ::SortedListProvider::count(); + // and confirm it is equal to the length of the `VoterList`. + assert_eq!(pre_migrate_nominator_count, voter_list_len); + assert_eq!(pre_migrate_nominator_count, voter_list_count); + + crate::display_and_check_bags::(currency_unit, currency_name); + }); +} diff --git a/frame/bags-list/remote-tests/src/sanity_check.rs b/frame/bags-list/remote-tests/src/sanity_check.rs new file mode 100644 index 0000000000000..e5e9f45bac5f4 --- /dev/null +++ b/frame/bags-list/remote-tests/src/sanity_check.rs @@ -0,0 +1,54 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test to execute the sanity-check of the voter bag. + +use frame_election_provider_support::SortedListProvider; +use frame_support::{ + storage::generator::StorageMap, + traits::{Get, PalletInfoAccess}, +}; +use remote_externalities::{Builder, Mode, OnlineConfig}; +use sp_runtime::traits::Block as BlockT; +use sp_std::convert::TryInto; + +/// Execute the sanity check of the bags-list. +pub async fn execute( + currency_unit: u64, + currency_name: &'static str, + ws_url: String, +) { + let mut ext = Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: ws_url.to_string().into(), + pallets: vec![pallet_bags_list::Pallet::::name().to_string()], + at: None, + state_snapshot: None, + })) + .inject_hashed_prefix(&>::prefix_hash()) + .inject_hashed_prefix(&>::prefix_hash()) + .build() + .await + .unwrap(); + + ext.execute_with(|| { + sp_core::crypto::set_default_ss58_version(Runtime::SS58Prefix::get().try_into().unwrap()); + pallet_bags_list::Pallet::::sanity_check().unwrap(); + log::info!(target: crate::LOG_TARGET, "executed bags-list sanity check with no errors."); + + crate::display_and_check_bags::(currency_unit, currency_name); + }); +} diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs new file mode 100644 index 0000000000000..6e186a65cb2b9 --- /dev/null +++ b/frame/bags-list/remote-tests/src/snapshot.rs @@ -0,0 +1,86 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test to execute the snapshot using the voter bag. + +use frame_support::traits::PalletInfoAccess; +use remote_externalities::{Builder, Mode, OnlineConfig}; +use sp_runtime::traits::Block as BlockT; + +/// Execute create a snapshot from pallet-staking. +pub async fn execute( + voter_limit: Option, + currency_unit: u64, + ws_url: String, +) { + use frame_support::storage::generator::StorageMap; + let mut ext = Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: ws_url.to_string().into(), + // NOTE: we don't scrape pallet-staking, this kinda ensures that the source of the data + // is bags-list. + pallets: vec![pallet_bags_list::Pallet::::name().to_string()], + at: None, + state_snapshot: None, + })) + .inject_hashed_prefix(&>::prefix_hash()) + .inject_hashed_prefix(&>::prefix_hash()) + .inject_hashed_prefix(&>::prefix_hash()) + .inject_hashed_prefix(&>::prefix_hash()) + .inject_hashed_key(&>::hashed_key()) + .inject_hashed_key(&>::hashed_key()) + .build() + .await + .unwrap(); + + ext.execute_with(|| { + use frame_election_provider_support::{ElectionDataProvider, SortedListProvider}; + log::info!( + target: crate::LOG_TARGET, + "{} nodes in bags list.", + ::SortedListProvider::count(), + ); + + let voters = as ElectionDataProvider< + Runtime::AccountId, + Runtime::BlockNumber, + >>::voters(voter_limit) + .unwrap(); + + let mut voters_nominator_only = voters + .iter() + .filter(|(v, _, _)| pallet_staking::Nominators::::contains_key(v)) + .cloned() + .collect::>(); + voters_nominator_only.sort_by_key(|(_, w, _)| *w); + + let currency_unit = currency_unit as f64; + let min_voter = voters_nominator_only + .first() + .map(|(x, y, _)| (x.clone(), *y as f64 / currency_unit)); + let max_voter = voters_nominator_only + .last() + .map(|(x, y, _)| (x.clone(), *y as f64 / currency_unit)); + log::info!( + target: crate::LOG_TARGET, + "a snapshot with limit {:?} has been created, {} voters are taken. min nominator: {:?}, max: {:?}", + voter_limit, + voters.len(), + min_voter, + max_voter + ); + }); +} diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 10a692e8b3f95..b7f96799e459f 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -65,12 +65,10 @@ pub mod mock; mod tests; pub mod weights; +pub use list::{notional_bag_for, Bag, Error, List, Node}; pub use pallet::*; pub use weights::WeightInfo; -pub use list::Error; -use list::List; - pub(crate) const LOG_TARGET: &'static str = "runtime::bags_list"; // syntactic sugar for logging. @@ -155,7 +153,7 @@ pub mod pallet { /// How many ids are registered. // NOTE: This is merely a counter for `ListNodes`. It should someday be replaced by the - // `CountedMaop` storage. + // `CountedMap` storage. #[pallet::storage] pub(crate) type CounterForListNodes = StorageValue<_, u32, ValueQuery>; diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index 057565e645f90..4efc3163816ff 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -53,7 +53,7 @@ mod tests; /// /// Note that even if the thresholds list does not have `VoteWeight::MAX` as its final member, this /// function behaves as if it does. -pub(crate) fn notional_bag_for(weight: VoteWeight) -> VoteWeight { +pub fn notional_bag_for(weight: VoteWeight) -> VoteWeight { let thresholds = T::BagThresholds::get(); let idx = thresholds.partition_point(|&threshold| weight > threshold); thresholds.get(idx).copied().unwrap_or(VoteWeight::MAX) @@ -690,7 +690,7 @@ pub struct Node { impl Node { /// Get a node by id. - pub(crate) fn get(id: &T::AccountId) -> Option> { + pub fn get(id: &T::AccountId) -> Option> { crate::ListNodes::::try_get(id).ok() } @@ -734,7 +734,7 @@ impl Node { } /// `true` when this voter is in the wrong bag. - pub(crate) fn is_misplaced(&self, current_weight: VoteWeight) -> bool { + pub fn is_misplaced(&self, current_weight: VoteWeight) -> bool { notional_bag_for::(current_weight) != self.bag_upper } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 02099d8543d4c..ec34efe397f5a 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -692,6 +692,9 @@ impl Pallet { // track every nominator iterated over, but not necessarily added to `all_voters` let mut nominators_seen = 0u32; + // cache the total-issuance once in this function + let weight_of = Self::weight_of_fn(); + let mut nominators_iter = T::SortedListProvider::iter(); while nominators_taken < nominators_quota && nominators_seen < nominators_quota * 2 { let nominator = match nominators_iter.next() { @@ -705,17 +708,23 @@ impl Pallet { if let Some(Nominations { submitted_in, mut targets, suppressed: _ }) = >::get(&nominator) { + log!( + trace, + "fetched nominator {:?} with weight {:?}", + nominator, + weight_of(&nominator) + ); targets.retain(|stash| { slashing_spans .get(stash) .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) }); if !targets.len().is_zero() { - all_voters.push((nominator.clone(), Self::weight_of(&nominator), targets)); + all_voters.push((nominator.clone(), weight_of(&nominator), targets)); nominators_taken.saturating_inc(); } } else { - log!(error, "invalid item in `SortedListProvider`: {:?}", nominator) + log!(error, "DEFENSIVE: invalid item in `SortedListProvider`: {:?}", nominator) } } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 733ec7c3200ad..3b9e08f75da85 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -385,7 +385,7 @@ impl Builder { }; for prefix in &self.hashed_prefixes { - debug!( + info!( target: LOG_TARGET, "adding data for hashed prefix: {:?}", HexDisplay::from(prefix) @@ -397,7 +397,7 @@ impl Builder { for key in &self.hashed_keys { let key = StorageKey(key.to_vec()); - debug!(target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key)); + info!(target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key)); let value = self.rpc_get_storage(key.clone(), Some(at)).await?; keys_and_values.push((key, value)); } From 0210ea04921021fea86f441ecad5275c487f6a78 Mon Sep 17 00:00:00 2001 From: Albrecht <14820950+weichweich@users.noreply.github.com> Date: Tue, 2 Nov 2021 10:27:44 +0100 Subject: [PATCH 034/162] don't panic if already migrated (#10141) * don't panic if already migrated * remove condition --- frame/vesting/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 27862a5ca4b72..654723d009fab 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -181,7 +181,11 @@ pub mod pallet { impl Hooks> for Pallet { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result<(), &'static str> { - migrations::v1::pre_migrate::() + if StorageVersion::::get() == Releases::V0 { + migrations::v1::pre_migrate::() + } else { + Ok(()) + } } fn on_runtime_upgrade() -> Weight { From 0465b0bb407fe36ea03ae35d5138633e23119aac Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 2 Nov 2021 10:35:23 +0100 Subject: [PATCH 035/162] Hashing proc macro utils (#9875) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * hashing macro * fmt * use in easy place, and fix blake sizes * fix * Fixes, docs. Allow ident as input. * fix doc tests * update error in test (nmapkey and key are same type). * hashing crates under sp_core * Doc updates and format. * use all existing hashing functions. * return array of u8 * Update primitives/core/hashing/proc-macro/src/impls.rs Co-authored-by: Bastian Köcher * ToTokeen for an array of u8 * fix * re * Improve impls * complete doc tests * fmt * fix doctest format * fix ui test (nmap key type alias) Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- Cargo.lock | 26 +++ Cargo.toml | 2 + client/executor/Cargo.toml | 1 + client/executor/src/wasm_runtime.rs | 2 +- frame/support/Cargo.toml | 1 + frame/support/src/lib.rs | 6 +- .../storage_info_unsatisfied_nmap.stderr | 4 +- primitives/core/Cargo.toml | 4 + primitives/core/hashing/Cargo.toml | 31 +++ primitives/core/hashing/proc-macro/Cargo.toml | 22 +++ .../core/hashing/proc-macro/src/impls.rs | 124 ++++++++++++ primitives/core/hashing/proc-macro/src/lib.rs | 129 ++++++++++++ primitives/core/hashing/src/lib.rs | 166 ++++++++++++++++ primitives/core/src/hashing.rs | 184 ++++-------------- 14 files changed, 553 insertions(+), 149 deletions(-) create mode 100644 primitives/core/hashing/Cargo.toml create mode 100644 primitives/core/hashing/proc-macro/Cargo.toml create mode 100644 primitives/core/hashing/proc-macro/src/impls.rs create mode 100644 primitives/core/hashing/proc-macro/src/lib.rs create mode 100644 primitives/core/hashing/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9686bf426fd26..20499facb5b71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2051,6 +2051,7 @@ dependencies = [ "smallvec 1.7.0", "sp-arithmetic", "sp-core", + "sp-core-hashing-proc-macro", "sp-inherents", "sp-io", "sp-runtime", @@ -7919,6 +7920,7 @@ dependencies = [ "sc-tracing", "sp-api", "sp-core", + "sp-core-hashing-proc-macro", "sp-externalities", "sp-io", "sp-maybe-compressed-blob", @@ -9303,6 +9305,8 @@ dependencies = [ "serde", "serde_json", "sha2 0.9.8", + "sp-core-hashing", + "sp-core-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", @@ -9319,6 +9323,28 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-core-hashing" +version = "4.0.0-dev" +dependencies = [ + "blake2-rfc", + "byteorder", + "sha2 0.9.8", + "sp-std", + "tiny-keccak", + "twox-hash", +] + +[[package]] +name = "sp-core-hashing-proc-macro" +version = "4.0.0-dev" +dependencies = [ + "proc-macro2", + "quote", + "sp-core-hashing", + "syn", +] + [[package]] name = "sp-database" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 197b156dea2ec..32d10ca8978dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -155,6 +155,8 @@ members = [ "primitives/consensus/pow", "primitives/consensus/vrf", "primitives/core", + "primitives/core/hashing", + "primitives/core/hashing/proc-macro", "primitives/database", "primitives/debug-derive", "primitives/externalities", diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index c8246d43a0f89..bc55172bc33ef 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -33,6 +33,7 @@ sc-executor-wasmtime = { version = "0.10.0-dev", path = "wasmtime", optional = t parking_lot = "0.11.1" log = "0.4.8" libsecp256k1 = "0.6" +sp-core-hashing-proc-macro = { version = "4.0.0-dev", path = "../../primitives/core/hashing/proc-macro" } [dev-dependencies] wat = "1.0" diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index c7aa1200719de..204a095717fc4 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -344,7 +344,7 @@ fn decode_version(mut version: &[u8]) -> Result { })? .into(); - let core_api_id = sp_core::hashing::blake2_64(b"Core"); + let core_api_id = sp_core_hashing_proc_macro::blake2b_64!(b"Core"); if v.has_api_with(&core_api_id, |v| v >= 3) { sp_api::RuntimeVersion::decode(&mut version).map_err(|_| { WasmError::Instantiation("failed to decode \"Core_version\" result".into()) diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 4bc64d8b8e73a..b62ae3384fe42 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -34,6 +34,7 @@ bitflags = "1.3" impl-trait-for-tuples = "0.2.1" smallvec = "1.7.0" log = { version = "0.4.14", default-features = false } +sp-core-hashing-proc-macro = { version = "4.0.0-dev", path = "../../primitives/core/hashing/proc-macro" } [dev-dependencies] assert_matches = "1.3.0" diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index d81300a404c4f..6e60988a4ca23 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -42,6 +42,8 @@ pub use scale_info; pub use serde; pub use sp_core::Void; #[doc(hidden)] +pub use sp_core_hashing_proc_macro; +#[doc(hidden)] pub use sp_io::{self, storage::root as storage_root}; #[doc(hidden)] pub use sp_runtime::RuntimeDebug; @@ -427,9 +429,7 @@ macro_rules! parameter_types { /// Returns the key for this parameter type. #[allow(unused)] pub fn key() -> [u8; 16] { - $crate::sp_io::hashing::twox_128( - concat!(":", stringify!($name), ":").as_bytes() - ) + $crate::sp_core_hashing_proc_macro::twox_128!(b":", $name, b":") } /// Set the value of this parameter type in the storage. diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 2b70102fdac24..ffbc5aeea6b4f 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -4,8 +4,8 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied 10 | #[pallet::generate_storage_info] | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | - = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` - = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` + = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `NMapKey` + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>` note: required by `storage_info` --> $DIR/storage.rs:71:2 | diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 14a76e2482444..148f2343ee2b7 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -66,6 +66,7 @@ sha2 = { version = "0.9.8", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } twox-hash = { version = "1.6.1", default-features = false, optional = true } libsecp256k1 = { version = "0.6", default-features = false, features = ["hmac", "static-context"], optional = true } +sp-core-hashing = { version = "4.0.0-dev", path = "./hashing", default-features = false, optional = true } merlin = { version = "2.0", default-features = false, optional = true } ss58-registry = "1.0.0" sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } @@ -76,6 +77,7 @@ hex-literal = "0.3.3" rand = "0.7.2" criterion = "0.3.3" serde_json = "1.0" +sp-core-hashing-proc-macro = { version = "4.0.0-dev", path = "./hashing/proc-macro" } [[bench]] name = "bench" @@ -118,6 +120,7 @@ std = [ "regex", "num-traits/std", "tiny-keccak", + "sp-core-hashing/std", "sp-debug-derive/std", "sp-externalities", "sp-storage/std", @@ -142,6 +145,7 @@ full_crypto = [ "sha2", "twox-hash", "libsecp256k1", + "sp-core-hashing", "sp-runtime-interface/disable_target_static_assertions", "merlin", ] diff --git a/primitives/core/hashing/Cargo.toml b/primitives/core/hashing/Cargo.toml new file mode 100644 index 0000000000000..43c670b59b0a7 --- /dev/null +++ b/primitives/core/hashing/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "sp-core-hashing" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Primitive core crate hashing implementation." +documentation = "https://docs.rs/sp-core-hashing" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../std" } +byteorder = { version = "1.3.2", default-features = false } + +blake2-rfc = { version = "0.2.18", default-features = false } +tiny-keccak = { version = "2.0.1", features = ["keccak"] } +sha2 = { version = "0.9.2", default-features = false } +twox-hash = { version = "1.5.0", default-features = false } + +[features] +default = ["std"] +std = [ + "blake2-rfc/std", + "sha2/std", + "sp-std/std", + "twox-hash/std", +] diff --git a/primitives/core/hashing/proc-macro/Cargo.toml b/primitives/core/hashing/proc-macro/Cargo.toml new file mode 100644 index 0000000000000..6d83b50b8a296 --- /dev/null +++ b/primitives/core/hashing/proc-macro/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "sp-core-hashing-proc-macro" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "This crate provides procedural macros for calculating static hash." +documentation = "https://docs.rs/sp-core-hashing-proc-macro" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "1.0.77", features = ["full", "parsing"] } +quote = "1.0.6" +proc-macro2 = "1.0.29" +sp-core-hashing = { version = "4.0.0-dev", path = "../", default-features = false } diff --git a/primitives/core/hashing/proc-macro/src/impls.rs b/primitives/core/hashing/proc-macro/src/impls.rs new file mode 100644 index 0000000000000..0ce388762aa30 --- /dev/null +++ b/primitives/core/hashing/proc-macro/src/impls.rs @@ -0,0 +1,124 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use quote::quote; +use syn::parse::{Parse, ParseStream}; + +use proc_macro::TokenStream; + +pub(super) struct InputBytes(pub Vec); + +pub(super) struct MultipleInputBytes(pub Vec>); + +impl MultipleInputBytes { + pub(super) fn concatenated(mut self) -> Vec { + if self.0.len() == 0 { + Vec::new() + } else { + let mut result = core::mem::take(&mut self.0[0]); + for other in self.0[1..].iter_mut() { + result.append(other); + } + result + } + } +} + +impl Parse for InputBytes { + fn parse(input: ParseStream) -> syn::Result { + match syn::ExprArray::parse(input) { + Ok(array) => { + let mut bytes = Vec::::new(); + for expr in array.elems.iter() { + match expr { + syn::Expr::Lit(lit) => match &lit.lit { + syn::Lit::Int(b) => bytes.push(b.base10_parse()?), + syn::Lit::Byte(b) => bytes.push(b.value()), + _ => + return Err(syn::Error::new( + input.span(), + "Expected array of u8 elements.".to_string(), + )), + }, + _ => + return Err(syn::Error::new( + input.span(), + "Expected array of u8 elements.".to_string(), + )), + } + } + return Ok(InputBytes(bytes)) + }, + Err(_e) => (), + } + // use rust names as a vec of their utf8 bytecode. + match syn::Ident::parse(input) { + Ok(ident) => return Ok(InputBytes(ident.to_string().as_bytes().to_vec())), + Err(_e) => (), + } + Ok(InputBytes(syn::LitByteStr::parse(input)?.value())) + } +} + +impl Parse for MultipleInputBytes { + fn parse(input: ParseStream) -> syn::Result { + let elts = + syn::punctuated::Punctuated::::parse_terminated(input)?; + Ok(MultipleInputBytes(elts.into_iter().map(|elt| elt.0).collect())) + } +} + +pub(super) fn twox_64(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::twox_64(bytes.as_slice())) +} + +pub(super) fn twox_128(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::twox_128(bytes.as_slice())) +} + +pub(super) fn blake2b_512(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::blake2_512(bytes.as_slice())) +} + +pub(super) fn blake2b_256(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::blake2_256(bytes.as_slice())) +} + +pub(super) fn blake2b_64(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::blake2_64(bytes.as_slice())) +} + +pub(super) fn keccak_256(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::keccak_256(bytes.as_slice())) +} + +pub(super) fn keccak_512(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::keccak_512(bytes.as_slice())) +} + +pub(super) fn sha2_256(bytes: Vec) -> TokenStream { + bytes_to_array(sp_core_hashing::sha2_256(bytes.as_slice())) +} + +fn bytes_to_array(bytes: impl IntoIterator) -> TokenStream { + let bytes = bytes.into_iter(); + + quote!( + [ #( #bytes ),* ] + ) + .into() +} diff --git a/primitives/core/hashing/proc-macro/src/lib.rs b/primitives/core/hashing/proc-macro/src/lib.rs new file mode 100644 index 0000000000000..2af8554f4ece9 --- /dev/null +++ b/primitives/core/hashing/proc-macro/src/lib.rs @@ -0,0 +1,129 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Macros to calculate constant hash bytes result. +//! +//! Macros from this crate does apply a specific hash function on input. +//! Input can be literal byte array as `b"content"` or array of bytes +//! as `[1, 2, 3]`. +//! Rust identifier can also be use, in this case we use their utf8 string +//! byte representation, for instance if the ident is `MyStruct`, then +//! `b"MyStruct"` will be hashed. +//! If multiple arguments comma separated are passed, they are concatenated +//! then hashed. +//! +//! Examples: +//! +//! ```rust +//! assert_eq!( +//! sp_core_hashing_proc_macro::blake2b_256!(b"test"), +//! sp_core_hashing::blake2_256(b"test"), +//! ); +//! assert_eq!( +//! sp_core_hashing_proc_macro::blake2b_256!([1u8]), +//! sp_core_hashing::blake2_256(&[1u8]), +//! ); +//! assert_eq!( +//! sp_core_hashing_proc_macro::blake2b_256!([1, 2, 3]), +//! sp_core_hashing::blake2_256(&[1, 2, 3]), +//! ); +//! assert_eq!( +//! sp_core_hashing_proc_macro::blake2b_256!(identifier), +//! sp_core_hashing::blake2_256(b"identifier"), +//! ); +//! assert_eq!( +//! sp_core_hashing_proc_macro::blake2b_256!(identifier, b"/string"), +//! sp_core_hashing::blake2_256(b"identifier/string"), +//! ); +//! ``` + +mod impls; + +use impls::MultipleInputBytes; +use proc_macro::TokenStream; + +/// Process a Blake2 64-bit hash of bytes parameter outputs a `[u8; 8]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn blake2b_64(input: TokenStream) -> TokenStream { + impls::blake2b_64(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} + +/// Apply a Blake2 256-bit hash of bytes parameter, outputs a `[u8; 32]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn blake2b_256(input: TokenStream) -> TokenStream { + impls::blake2b_256(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} + +/// Apply a Blake2 512-bit hash of bytes parameter, outputs a `[u8; 64]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn blake2b_512(input: TokenStream) -> TokenStream { + impls::blake2b_512(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} + +/// Apply a XX 64-bit hash on its bytes parameter, outputs a `[u8; 8]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn twox_64(input: TokenStream) -> TokenStream { + impls::twox_64(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} + +/// Apply a XX 128-bit hash on its bytes parameter, outputs a `[u8; 16]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn twox_128(input: TokenStream) -> TokenStream { + impls::twox_128(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} + +/// Apply a keccak 256-bit hash on its bytes parameter, outputs a `[u8; 32]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn keccak_256(input: TokenStream) -> TokenStream { + impls::keccak_256(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} + +/// Apply a keccak 512-bit hash on its bytes parameter, outputs a `[u8; 64]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn keccak_512(input: TokenStream) -> TokenStream { + impls::keccak_512(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} + +/// Apply a sha2 256-bit hash on its bytes parameter, outputs a `[u8; 32]`. +/// Multiple inputs are concatenated before hashing. +/// Input can be identifier (name of identifier as bytes is used), byte string or +/// array of bytes. +#[proc_macro] +pub fn sha2_256(input: TokenStream) -> TokenStream { + impls::sha2_256(syn::parse_macro_input!(input as MultipleInputBytes).concatenated()) +} diff --git a/primitives/core/hashing/src/lib.rs b/primitives/core/hashing/src/lib.rs new file mode 100644 index 0000000000000..f806613c5b545 --- /dev/null +++ b/primitives/core/hashing/src/lib.rs @@ -0,0 +1,166 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Hashing Functions. + +#![warn(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +use sha2::{Digest, Sha256}; +use tiny_keccak::{Hasher, Keccak}; + +/// Do a Blake2 512-bit hash and place result in `dest`. +pub fn blake2_512_into(data: &[u8], dest: &mut [u8; 64]) { + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(64, &[], data).as_bytes()); +} + +/// Do a Blake2 512-bit hash and return result. +pub fn blake2_512(data: &[u8]) -> [u8; 64] { + let mut r = [0; 64]; + blake2_512_into(data, &mut r); + r +} + +/// Do a Blake2 256-bit hash and place result in `dest`. +pub fn blake2_256_into(data: &[u8], dest: &mut [u8; 32]) { + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); +} + +/// Do a Blake2 256-bit hash and return result. +pub fn blake2_256(data: &[u8]) -> [u8; 32] { + let mut r = [0; 32]; + blake2_256_into(data, &mut r); + r +} + +/// Do a Blake2 128-bit hash and place result in `dest`. +pub fn blake2_128_into(data: &[u8], dest: &mut [u8; 16]) { + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(16, &[], data).as_bytes()); +} + +/// Do a Blake2 128-bit hash and return result. +pub fn blake2_128(data: &[u8]) -> [u8; 16] { + let mut r = [0; 16]; + blake2_128_into(data, &mut r); + r +} + +/// Do a Blake2 64-bit hash and place result in `dest`. +pub fn blake2_64_into(data: &[u8], dest: &mut [u8; 8]) { + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], data).as_bytes()); +} + +/// Do a Blake2 64-bit hash and return result. +pub fn blake2_64(data: &[u8]) -> [u8; 8] { + let mut r = [0; 8]; + blake2_64_into(data, &mut r); + r +} + +/// Do a XX 64-bit hash and place result in `dest`. +pub fn twox_64_into(data: &[u8], dest: &mut [u8; 8]) { + use core::hash::Hasher; + let mut h0 = twox_hash::XxHash::with_seed(0); + h0.write(data); + let r0 = h0.finish(); + use byteorder::{ByteOrder, LittleEndian}; + LittleEndian::write_u64(&mut dest[0..8], r0); +} + +/// Do a XX 64-bit hash and return result. +pub fn twox_64(data: &[u8]) -> [u8; 8] { + let mut r: [u8; 8] = [0; 8]; + twox_64_into(data, &mut r); + r +} + +/// Do a XX 128-bit hash and place result in `dest`. +pub fn twox_128_into(data: &[u8], dest: &mut [u8; 16]) { + use core::hash::Hasher; + let mut h0 = twox_hash::XxHash::with_seed(0); + let mut h1 = twox_hash::XxHash::with_seed(1); + h0.write(data); + h1.write(data); + let r0 = h0.finish(); + let r1 = h1.finish(); + use byteorder::{ByteOrder, LittleEndian}; + LittleEndian::write_u64(&mut dest[0..8], r0); + LittleEndian::write_u64(&mut dest[8..16], r1); +} + +/// Do a XX 128-bit hash and return result. +pub fn twox_128(data: &[u8]) -> [u8; 16] { + let mut r: [u8; 16] = [0; 16]; + twox_128_into(data, &mut r); + r +} + +/// Do a XX 256-bit hash and place result in `dest`. +pub fn twox_256_into(data: &[u8], dest: &mut [u8; 32]) { + use ::core::hash::Hasher; + use byteorder::{ByteOrder, LittleEndian}; + let mut h0 = twox_hash::XxHash::with_seed(0); + let mut h1 = twox_hash::XxHash::with_seed(1); + let mut h2 = twox_hash::XxHash::with_seed(2); + let mut h3 = twox_hash::XxHash::with_seed(3); + h0.write(data); + h1.write(data); + h2.write(data); + h3.write(data); + let r0 = h0.finish(); + let r1 = h1.finish(); + let r2 = h2.finish(); + let r3 = h3.finish(); + LittleEndian::write_u64(&mut dest[0..8], r0); + LittleEndian::write_u64(&mut dest[8..16], r1); + LittleEndian::write_u64(&mut dest[16..24], r2); + LittleEndian::write_u64(&mut dest[24..32], r3); +} + +/// Do a XX 256-bit hash and return result. +pub fn twox_256(data: &[u8]) -> [u8; 32] { + let mut r: [u8; 32] = [0; 32]; + twox_256_into(data, &mut r); + r +} + +/// Do a keccak 256-bit hash and return result. +pub fn keccak_256(data: &[u8]) -> [u8; 32] { + let mut keccak = Keccak::v256(); + keccak.update(data); + let mut output = [0u8; 32]; + keccak.finalize(&mut output); + output +} + +/// Do a keccak 512-bit hash and return result. +pub fn keccak_512(data: &[u8]) -> [u8; 64] { + let mut keccak = Keccak::v512(); + keccak.update(data); + let mut output = [0u8; 64]; + keccak.finalize(&mut output); + output +} + +/// Do a sha2 256-bit hash and return result. +pub fn sha2_256(data: &[u8]) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(data); + let mut output = [0u8; 32]; + output.copy_from_slice(&hasher.finalize()); + output +} diff --git a/primitives/core/src/hashing.rs b/primitives/core/src/hashing.rs index 4c719f7c69832..092cfc51b9508 100644 --- a/primitives/core/src/hashing.rs +++ b/primitives/core/src/hashing.rs @@ -22,147 +22,45 @@ //! unless you know what you're doing. Using `sp_io` will be more performant, since instead of //! computing the hash in WASM it delegates that computation to the host client. -use sha2::{Digest, Sha256}; -use tiny_keccak::{Hasher, Keccak}; - -/// Do a Blake2 512-bit hash and place result in `dest`. -pub fn blake2_512_into(data: &[u8], dest: &mut [u8; 64]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(64, &[], data).as_bytes()); -} - -/// Do a Blake2 512-bit hash and return result. -pub fn blake2_512(data: &[u8]) -> [u8; 64] { - let mut r = [0; 64]; - blake2_512_into(data, &mut r); - r -} - -/// Do a Blake2 256-bit hash and place result in `dest`. -pub fn blake2_256_into(data: &[u8], dest: &mut [u8; 32]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); -} - -/// Do a Blake2 256-bit hash and return result. -pub fn blake2_256(data: &[u8]) -> [u8; 32] { - let mut r = [0; 32]; - blake2_256_into(data, &mut r); - r -} - -/// Do a Blake2 128-bit hash and place result in `dest`. -pub fn blake2_128_into(data: &[u8], dest: &mut [u8; 16]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(16, &[], data).as_bytes()); -} - -/// Do a Blake2 128-bit hash and return result. -pub fn blake2_128(data: &[u8]) -> [u8; 16] { - let mut r = [0; 16]; - blake2_128_into(data, &mut r); - r -} - -/// Do a Blake2 64-bit hash and place result in `dest`. -pub fn blake2_64_into(data: &[u8], dest: &mut [u8; 8]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], data).as_bytes()); -} - -/// Do a Blake2 64-bit hash and return result. -pub fn blake2_64(data: &[u8]) -> [u8; 8] { - let mut r = [0; 8]; - blake2_64_into(data, &mut r); - r -} - -/// Do a XX 64-bit hash and place result in `dest`. -pub fn twox_64_into(data: &[u8], dest: &mut [u8; 8]) { - use core::hash::Hasher; - let mut h0 = twox_hash::XxHash::with_seed(0); - h0.write(data); - let r0 = h0.finish(); - use byteorder::{ByteOrder, LittleEndian}; - LittleEndian::write_u64(&mut dest[0..8], r0); -} - -/// Do a XX 64-bit hash and return result. -pub fn twox_64(data: &[u8]) -> [u8; 8] { - let mut r: [u8; 8] = [0; 8]; - twox_64_into(data, &mut r); - r -} - -/// Do a XX 128-bit hash and place result in `dest`. -pub fn twox_128_into(data: &[u8], dest: &mut [u8; 16]) { - use core::hash::Hasher; - let mut h0 = twox_hash::XxHash::with_seed(0); - let mut h1 = twox_hash::XxHash::with_seed(1); - h0.write(data); - h1.write(data); - let r0 = h0.finish(); - let r1 = h1.finish(); - use byteorder::{ByteOrder, LittleEndian}; - LittleEndian::write_u64(&mut dest[0..8], r0); - LittleEndian::write_u64(&mut dest[8..16], r1); -} - -/// Do a XX 128-bit hash and return result. -pub fn twox_128(data: &[u8]) -> [u8; 16] { - let mut r: [u8; 16] = [0; 16]; - twox_128_into(data, &mut r); - r -} - -/// Do a XX 256-bit hash and place result in `dest`. -pub fn twox_256_into(data: &[u8], dest: &mut [u8; 32]) { - use ::core::hash::Hasher; - use byteorder::{ByteOrder, LittleEndian}; - let mut h0 = twox_hash::XxHash::with_seed(0); - let mut h1 = twox_hash::XxHash::with_seed(1); - let mut h2 = twox_hash::XxHash::with_seed(2); - let mut h3 = twox_hash::XxHash::with_seed(3); - h0.write(data); - h1.write(data); - h2.write(data); - h3.write(data); - let r0 = h0.finish(); - let r1 = h1.finish(); - let r2 = h2.finish(); - let r3 = h3.finish(); - LittleEndian::write_u64(&mut dest[0..8], r0); - LittleEndian::write_u64(&mut dest[8..16], r1); - LittleEndian::write_u64(&mut dest[16..24], r2); - LittleEndian::write_u64(&mut dest[24..32], r3); -} - -/// Do a XX 256-bit hash and return result. -pub fn twox_256(data: &[u8]) -> [u8; 32] { - let mut r: [u8; 32] = [0; 32]; - twox_256_into(data, &mut r); - r -} - -/// Do a keccak 256-bit hash and return result. -pub fn keccak_256(data: &[u8]) -> [u8; 32] { - let mut keccak = Keccak::v256(); - keccak.update(data); - let mut output = [0u8; 32]; - keccak.finalize(&mut output); - output -} - -/// Do a keccak 512-bit hash and return result. -pub fn keccak_512(data: &[u8]) -> [u8; 64] { - let mut keccak = Keccak::v512(); - keccak.update(data); - let mut output = [0u8; 64]; - keccak.finalize(&mut output); - output -} - -/// Do a sha2 256-bit hash and return result. -pub fn sha2_256(data: &[u8]) -> [u8; 32] { - let mut hasher = Sha256::new(); - hasher.update(data); - let mut output = [0u8; 32]; - output.copy_from_slice(&hasher.finalize()); - output +pub use sp_core_hashing::*; + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn blake2b() { + assert_eq!(sp_core_hashing_proc_macro::blake2b_64!(b""), blake2_64(b"")[..]); + assert_eq!(sp_core_hashing_proc_macro::blake2b_256!(b"test"), blake2_256(b"test")[..]); + assert_eq!(sp_core_hashing_proc_macro::blake2b_512!(b""), blake2_512(b"")[..]); + } + + #[test] + fn keccak() { + assert_eq!(sp_core_hashing_proc_macro::keccak_256!(b"test"), keccak_256(b"test")[..]); + assert_eq!(sp_core_hashing_proc_macro::keccak_512!(b"test"), keccak_512(b"test")[..]); + } + + #[test] + fn sha2() { + assert_eq!(sp_core_hashing_proc_macro::sha2_256!(b"test"), sha2_256(b"test")[..]); + } + + #[test] + fn twox() { + assert_eq!(sp_core_hashing_proc_macro::twox_128!(b"test"), twox_128(b"test")[..]); + assert_eq!(sp_core_hashing_proc_macro::twox_64!(b""), twox_64(b"")[..]); + } + + #[test] + fn twox_concats() { + assert_eq!( + sp_core_hashing_proc_macro::twox_128!(b"test", b"123", b"45", b"", b"67890"), + super::twox_128(&b"test1234567890"[..]), + ); + assert_eq!( + sp_core_hashing_proc_macro::twox_128!(b"test", test, b"45", b"", b"67890"), + super::twox_128(&b"testtest4567890"[..]), + ); + } } From 9d8e5c468e252f0773c7bae60a631a139969b046 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Tue, 2 Nov 2021 15:09:40 +0100 Subject: [PATCH 036/162] Spelling (#10154) --- docs/STYLE_GUIDE.md | 4 ++-- frame/babe/src/lib.rs | 2 +- frame/multisig/src/lib.rs | 2 +- primitives/state-machine/src/lib.rs | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/STYLE_GUIDE.md b/docs/STYLE_GUIDE.md index ea070cdbc59f3..8854f885a4b22 100644 --- a/docs/STYLE_GUIDE.md +++ b/docs/STYLE_GUIDE.md @@ -140,8 +140,8 @@ let mut target_path = ``` - Unsafe code requires explicit proofs just as panickers do. When introducing unsafe code, - consider tradeoffs between efficiency on one hand and reliability, maintenance costs, and - security on the other. Here is a list of questions that may help evaluating the tradeoff while + consider trade-offs between efficiency on one hand and reliability, maintenance costs, and + security on the other. Here is a list of questions that may help evaluating the trade-off while preparing or reviewing a PR: - how much more performant or compact the resulting code will be using unsafe code, - how likely is it that invariants could be violated, diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 9c755eea6c446..c74bbf897ac7f 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -248,7 +248,7 @@ pub mod pallet { /// Randomness under construction. /// - /// We make a tradeoff between storage accesses and list length. + /// We make a trade-off between storage accesses and list length. /// We store the under-construction randomness in segments of up to /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. /// diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 53567cc212afd..c38ddf1793ee1 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -218,7 +218,7 @@ pub mod pallet { CallHash, DispatchResult, ), - /// A multisig operation has been cancelled. \[cancelling, timepoint, multisig, call_hash\] + /// A multisig operation has been cancelled. \[canceling, timepoint, multisig, call_hash\] MultisigCancelled(T::AccountId, Timepoint, T::AccountId, CallHash), } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 7bd0c645f3c00..a724cf5c9a0b4 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1496,7 +1496,7 @@ mod tests { } overlay.start_transaction(); - // Then only initlaization item and second (commited) item should persist. + // Then only initlaization item and second (committed) item should persist. { let ext = Ext::new( &mut overlay, From 635c187e8f92a239772ffd70d7a445e073f8920f Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Tue, 2 Nov 2021 15:20:00 +0100 Subject: [PATCH 037/162] Better error for when origin filter prevent the call to be dispatched (#10134) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * better error * Apply suggestions from code review Co-authored-by: Bastian Köcher * remove unused * fix error * fmt * fix tests * fmt * Update frame/contracts/src/exec.rs Co-authored-by: Alexander Theißen * fix typo Co-authored-by: Bastian Köcher Co-authored-by: Alexander Theißen --- frame/contracts/src/exec.rs | 12 +++--- frame/multisig/src/tests.rs | 2 +- frame/proxy/src/tests.rs | 42 ++++++++++++++----- .../src/construct_runtime/expand/call.rs | 6 ++- .../src/construct_runtime/expand/origin.rs | 13 +----- .../procedural/src/construct_runtime/mod.rs | 13 +++++- frame/support/test/tests/system.rs | 4 +- frame/system/src/lib.rs | 2 + frame/utility/src/tests.rs | 10 +++-- 9 files changed, 67 insertions(+), 37 deletions(-) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index c28490dfacccf..7ef1aec2dfc60 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1094,10 +1094,7 @@ mod tests { use pallet_contracts_primitives::ReturnFlags; use pretty_assertions::assert_eq; use sp_core::Bytes; - use sp_runtime::{ - traits::{BadOrigin, Hash}, - DispatchError, - }; + use sp_runtime::{traits::Hash, DispatchError}; use std::{cell::RefCell, collections::HashMap, rc::Rc}; type System = frame_system::Pallet; @@ -2114,7 +2111,10 @@ mod tests { let forbidden_call = Call::Balances(BalanceCall::transfer { dest: CHARLIE, value: 22 }); // simple cases: direct call - assert_err!(ctx.ext.call_runtime(forbidden_call.clone()), BadOrigin); + assert_err!( + ctx.ext.call_runtime(forbidden_call.clone()), + frame_system::Error::::CallFiltered + ); // as part of a patch: return is OK (but it interrupted the batch) assert_ok!(ctx.ext.call_runtime(Call::Utility(UtilCall::batch { @@ -2159,7 +2159,7 @@ mod tests { phase: Phase::Initialization, event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted( 1, - BadOrigin.into() + frame_system::Error::::CallFiltered.into() ),), topics: vec![], }, diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index d46c22ec73d09..c5607c80abce4 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -846,7 +846,7 @@ fn multisig_filters() { let call = Box::new(Call::System(frame_system::Call::set_code { code: vec![] })); assert_noop!( Multisig::as_multi_threshold_1(Origin::signed(1), vec![2], call.clone()), - DispatchError::BadOrigin, + DispatchError::from(frame_system::Error::::CallFiltered), ); }); } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index d319ebb1a5ab0..20efd085fe882 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -174,6 +174,8 @@ use frame_system::Call as SystemCall; use pallet_balances::{Call as BalancesCall, Error as BalancesError, Event as BalancesEvent}; use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; +type SystemError = frame_system::Error; + pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { @@ -333,7 +335,9 @@ fn filtering_works() { assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); let derivative_id = Utility::derivative_account_id(1, 0); assert!(Balances::mutate_account(&derivative_id, |a| a.free = 1000).is_ok()); @@ -344,9 +348,13 @@ fn filtering_works() { assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); @@ -355,10 +363,12 @@ fn filtering_works() { ProxyEvent::ProxyExecuted(Ok(())).into(), ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ - UtilityEvent::BatchInterrupted(0, DispatchError::BadOrigin).into(), + UtilityEvent::BatchInterrupted(0, SystemError::CallFiltered.into()).into(), ProxyEvent::ProxyExecuted(Ok(())).into(), ]); @@ -371,18 +381,24 @@ fn filtering_works() { ProxyEvent::ProxyExecuted(Ok(())).into(), ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ - UtilityEvent::BatchInterrupted(0, DispatchError::BadOrigin).into(), + UtilityEvent::BatchInterrupted(0, SystemError::CallFiltered.into()).into(), ProxyEvent::ProxyExecuted(Ok(())).into(), ]); let call = Box::new(Call::Proxy(ProxyCall::remove_proxies {})); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ BalancesEvent::::Unreserved(1, 5).into(), @@ -462,13 +478,17 @@ fn proxying_works() { let call = Box::new(Call::System(SystemCall::set_code { code: vec![] })); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); let call = Box::new(Call::Balances(BalancesCall::transfer_keep_alive { dest: 6, value: 1 })); assert_ok!(Call::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) .dispatch(Origin::signed(2))); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); + System::assert_last_event( + ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); assert_eq!(Balances::free_balance(6), 2); diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index 2532a680e21be..5658ec045433a 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -22,6 +22,7 @@ use syn::Ident; pub fn expand_outer_dispatch( runtime: &Ident, + system_pallet: &Pallet, pallet_decls: &[Pallet], scrate: &TokenStream, ) -> TokenStream { @@ -29,6 +30,7 @@ pub fn expand_outer_dispatch( let mut variant_patterns = Vec::new(); let mut query_call_part_macros = Vec::new(); let mut pallet_names = Vec::new(); + let system_path = &system_pallet.path; let pallets_with_call = pallet_decls.iter().filter(|decl| decl.exists_part("Call")); @@ -106,7 +108,9 @@ pub fn expand_outer_dispatch( type PostInfo = #scrate::weights::PostDispatchInfo; fn dispatch(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { if !::filter_call(&origin, &self) { - return #scrate::sp_std::result::Result::Err(#scrate::dispatch::DispatchError::BadOrigin.into()); + return #scrate::sp_std::result::Result::Err( + #system_path::Error::<#runtime>::CallFiltered.into() + ); } #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index 57adf86a9fe18..eb0212c3efee3 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -18,23 +18,14 @@ use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; use proc_macro2::TokenStream; use quote::quote; -use syn::{token, Generics, Ident}; +use syn::{Generics, Ident}; pub fn expand_outer_origin( runtime: &Ident, + system_pallet: &Pallet, pallets: &[Pallet], - pallets_token: token::Brace, scrate: &TokenStream, ) -> syn::Result { - let system_pallet = - pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { - syn::Error::new( - pallets_token.span, - "`System` pallet declaration is missing. \ - Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", - ) - })?; - let mut caller_variants = TokenStream::new(); let mut pallet_conversions = TokenStream::new(); let mut query_origin_part_macros = Vec::new(); diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 4315d4278183a..f54fa79ce609b 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -214,17 +214,26 @@ fn construct_runtime_final_expansion( pallets_token, } = definition; + let system_pallet = + pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { + syn::Error::new( + pallets_token.span, + "`System` pallet declaration is missing. \ + Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", + ) + })?; + let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); let outer_event = expand::expand_outer_event(&name, &pallets, &scrate)?; - let outer_origin = expand::expand_outer_origin(&name, &pallets, pallets_token, &scrate)?; + let outer_origin = expand::expand_outer_origin(&name, &system_pallet, &pallets, &scrate)?; let all_pallets = decl_all_pallets(&name, pallets.iter()); let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); - let dispatch = expand::expand_outer_dispatch(&name, &pallets, &scrate); + let dispatch = expand::expand_outer_dispatch(&name, &system_pallet, &pallets, &scrate); let metadata = expand::expand_runtime_metadata(&name, &pallets, &scrate, &unchecked_extrinsic); let outer_config = expand::expand_outer_config(&name, &pallets, &scrate); let inherent = diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index 4acc248d25f20..9def12131dd19 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -63,7 +63,9 @@ frame_support::decl_error! { TestError, /// Error documentation /// with multiple lines - AnotherError + AnotherError, + // Required by construct_runtime + CallFiltered, } } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 2e7f26eef16f4..41e1738c034f1 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -560,6 +560,8 @@ pub mod pallet { NonDefaultComposite, /// There is a non-zero reference count preventing the account from being purged. NonZeroRefCount, + /// The origin filter prevent the call to be dispatched. + CallFiltered, } /// Exposed trait-generic origin type. diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index bbfbb417e23d1..cb7a3d9a21e29 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -288,7 +288,7 @@ fn as_derivative_filters() { value: 1 })), ), - DispatchError::BadOrigin + DispatchError::from(frame_system::Error::::CallFiltered), ); }); } @@ -338,7 +338,8 @@ fn batch_with_signed_filters() { vec![Call::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 })] ),); System::assert_last_event( - utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into(), + utility::Event::BatchInterrupted(0, frame_system::Error::::CallFiltered.into()) + .into(), ); }); } @@ -573,7 +574,7 @@ fn batch_all_does_not_nest() { actual_weight: Some(::WeightInfo::batch_all(1) + info.weight), pays_fee: Pays::Yes }, - error: DispatchError::BadOrigin, + error: frame_system::Error::::CallFiltered.into(), } ); @@ -585,7 +586,8 @@ fn batch_all_does_not_nest() { // and balances. assert_ok!(Utility::batch_all(Origin::signed(1), vec![batch_nested])); System::assert_has_event( - utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into(), + utility::Event::BatchInterrupted(0, frame_system::Error::::CallFiltered.into()) + .into(), ); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); From 079889e54ddd18c16e0ff90221fbf8b5b76e5275 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 2 Nov 2021 20:24:00 +0000 Subject: [PATCH 038/162] add missing version to dependencies (#10148) --- frame/bags-list/remote-tests/Cargo.toml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/frame/bags-list/remote-tests/Cargo.toml b/frame/bags-list/remote-tests/Cargo.toml index c670178c6188a..ee5b8c7c3f6e7 100644 --- a/frame/bags-list/remote-tests/Cargo.toml +++ b/frame/bags-list/remote-tests/Cargo.toml @@ -14,21 +14,21 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # frame -pallet-staking = { path = "../../staking" } -pallet-bags-list = { path = "../../bags-list" } -frame-election-provider-support = { path = "../../election-provider-support" } -frame-system = { path = "../../system" } -frame-support = { path = "../../support" } +pallet-staking = { path = "../../staking", version = "4.0.0-dev" } +pallet-bags-list = { path = "../../bags-list", version = "4.0.0-dev" } +frame-election-provider-support = { path = "../../election-provider-support", version = "4.0.0-dev" } +frame-system = { path = "../../system", version = "4.0.0-dev" } +frame-support = { path = "../../support", version = "4.0.0-dev" } # core -sp-storage = { path = "../../../primitives/storage" } -sp-core = { path = "../../../primitives/core" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-std = { path = "../../../primitives/std" } +sp-storage = { path = "../../../primitives/storage", version = "4.0.0-dev" } +sp-core = { path = "../../../primitives/core", version = "4.0.0-dev" } +sp-tracing = { path = "../../../primitives/tracing", version = "4.0.0-dev" } +sp-runtime = { path = "../../../primitives/runtime", version = "4.0.0-dev" } +sp-std = { path = "../../../primitives/std", version = "4.0.0-dev" } # utils -remote-externalities = { path = "../../../utils/frame/remote-externalities" } +remote-externalities = { path = "../../../utils/frame/remote-externalities", version = "0.10.0-dev" } # others tokio = { version = "1", features = ["macros"] } From 7e42d7788002eadd94d3ce7782986a3fc6924e19 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 2 Nov 2021 22:21:39 +0100 Subject: [PATCH 039/162] Bump parity-db (#10151) --- Cargo.lock | 4 ++-- client/db/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20499facb5b71..a6f808ac00f03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6194,9 +6194,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91b679c6acc14fac74382942e2b73bea441686a33430b951ea03b5aeb6a7f254" +checksum = "e7ccc4a8687027deb53d45c5434a1f1b330c9d1069a59cfe80a62aa9a1da25ae" dependencies = [ "blake2-rfc", "crc32fast", diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 53af082d3b91d..165d01d555116 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -33,7 +33,7 @@ sc-state-db = { version = "0.10.0-dev", path = "../state-db" } sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } -parity-db = { version = "0.3.2", optional = true } +parity-db = { version = "0.3.3", optional = true } [dev-dependencies] sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } From 85a2a7a2820c0d1032903ac5584eda81461bad30 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 3 Nov 2021 10:12:50 +0100 Subject: [PATCH 040/162] Add a `trie_root_hash` variant for chain specs genesis (#10140) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add a `hash` variant for chain specs * Add doc * Rename to TrieRootHash * Apply suggestions from code review Co-authored-by: Bastian Köcher * Rustfmt * More cargo fmt I guess * Ok I have to use nightly cargo fmt Co-authored-by: Bastian Köcher --- client/chain-spec/src/chain_spec.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 2ddb56a0df845..4aa0aa74630e0 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -119,6 +119,10 @@ impl BuildStorage for ChainSpec { }) .collect(), }), + // The `StateRootHash` variant exists as a way to keep note that other clients support + // it, but Substrate itself isn't capable of loading chain specs with just a hash at the + // moment. + Genesis::StateRootHash(_) => Err("Genesis storage in hash format not supported".into()), } } @@ -144,6 +148,8 @@ pub struct RawGenesis { enum Genesis { Runtime(G), Raw(RawGenesis), + /// State root hash of the genesis storage. + StateRootHash(StorageData), } /// A configuration of a client. Does not include runtime storage initialization. From 26e5f5f526ced19dfe649010e8f19e2c3f957b12 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Nov 2021 10:29:16 +0000 Subject: [PATCH 041/162] Bump prost-build from 0.8.0 to 0.9.0 (#10162) Bumps [prost-build](https://github.com/tokio-rs/prost) from 0.8.0 to 0.9.0. - [Release notes](https://github.com/tokio-rs/prost/releases) - [Commits](https://github.com/tokio-rs/prost/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: prost-build dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 121 ++++++++++++++++++++------ client/authority-discovery/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- 3 files changed, 97 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6f808ac00f03..39dedb31839dd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1886,6 +1886,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" +[[package]] +name = "fixedbitset" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" + [[package]] name = "flate2" version = "1.0.20" @@ -3393,8 +3399,8 @@ dependencies = [ "multistream-select", "parking_lot 0.11.1", "pin-project 1.0.8", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "rand 0.7.3", "ring", "rw-stream-sink", @@ -3443,8 +3449,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "rand 0.7.3", "smallvec 1.7.0", ] @@ -3465,8 +3471,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "rand 0.7.3", "regex", "sha2 0.9.8", @@ -3485,8 +3491,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "smallvec 1.7.0", "wasm-timer", ] @@ -3506,8 +3512,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "rand 0.7.3", "sha2 0.9.8", "smallvec 1.7.0", @@ -3568,8 +3574,8 @@ dependencies = [ "lazy_static", "libp2p-core", "log 0.4.14", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "rand 0.8.4", "sha2 0.9.8", "snow", @@ -3604,8 +3610,8 @@ dependencies = [ "futures 0.3.16", "libp2p-core", "log 0.4.14", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "unsigned-varint 0.7.0", "void", ] @@ -3638,8 +3644,8 @@ dependencies = [ "libp2p-swarm", "log 0.4.14", "pin-project 1.0.8", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.8.0", "rand 0.7.3", "smallvec 1.7.0", "unsigned-varint 0.7.0", @@ -6485,7 +6491,17 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" dependencies = [ - "fixedbitset", + "fixedbitset 0.2.0", + "indexmap", +] + +[[package]] +name = "petgraph" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" +dependencies = [ + "fixedbitset 0.4.0", "indexmap", ] @@ -6792,7 +6808,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ "bytes 1.0.1", - "prost-derive", + "prost-derive 0.8.0", +] + +[[package]] +name = "prost" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" +dependencies = [ + "bytes 1.0.1", + "prost-derive 0.9.0", ] [[package]] @@ -6806,9 +6832,29 @@ dependencies = [ "itertools", "log 0.4.14", "multimap", - "petgraph", - "prost", - "prost-types", + "petgraph 0.5.1", + "prost 0.8.0", + "prost-types 0.8.0", + "tempfile", + "which", +] + +[[package]] +name = "prost-build" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" +dependencies = [ + "bytes 1.0.1", + "heck", + "itertools", + "lazy_static", + "log 0.4.14", + "multimap", + "petgraph 0.6.0", + "prost 0.9.0", + "prost-types 0.9.0", + "regex", "tempfile", "which", ] @@ -6826,6 +6872,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.8.0" @@ -6833,7 +6892,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ "bytes 1.0.1", - "prost", + "prost 0.8.0", +] + +[[package]] +name = "prost-types" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" +dependencies = [ + "bytes 1.0.1", + "prost 0.9.0", ] [[package]] @@ -7471,8 +7540,8 @@ dependencies = [ "libp2p", "log 0.4.14", "parity-scale-codec", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.9.0", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8138,8 +8207,8 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.8", - "prost", - "prost-build", + "prost 0.8.0", + "prost-build 0.9.0", "quickcheck", "rand 0.7.3", "sc-block-builder", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index cee35a43df2f6..dca365981984d 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.8" +prost-build = "0.9" [dependencies] async-trait = "0.1" diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index d6d054504369b..7b4b30c593855 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.8" +prost-build = "0.9" [dependencies] async-trait = "0.1" From bdf97a350054b882dbe0c1cdcb377b3a9bf2e411 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 3 Nov 2021 13:23:32 +0100 Subject: [PATCH 042/162] Update CI image to the latest rustc (#10142) * Update CI image to the latest rustc * Update rustc error messages * whitespace * update test output * updating compiler errors * kid's finally learning to read and spots the TRYBUILD=overwrite message. * undoing a little blessing * imperfect error msg * revert ci image to production Co-authored-by: gilescope --- .../no_std_genesis_config.stderr | 7 +- .../undefined_call_part.stderr | 9 +- .../undefined_event_part.stderr | 9 +- .../undefined_genesis_config_part.stderr | 9 +- .../undefined_inherent_part.stderr | 9 +- .../undefined_origin_part.stderr | 9 +- .../undefined_validate_unsigned_part.stderr | 9 +- .../test/tests/derive_no_bound_ui/eq.stderr | 10 +- .../call_argument_invalid_bound.stderr | 2 +- .../call_argument_invalid_bound_2.stderr | 22 ++--- .../pallet_ui/event_field_not_member.stderr | 2 +- .../genesis_default_not_satisfied.stderr | 7 +- .../tests/pallet_ui/hooks_invalid_item.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 92 +++++++++---------- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 92 +++++++++---------- .../type_value_forgotten_where_clause.stderr | 42 +++++---- ...reference_in_impl_runtime_apis_call.stderr | 11 ++- 17 files changed, 186 insertions(+), 157 deletions(-) diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 3dc7fcda9f18a..e458265a07cab 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -82,7 +82,8 @@ error[E0277]: the trait bound `Runtime: frame_system::pallet::Config` is not sat 11 | impl test_pallet::Config for Runtime {} | ^^^^^^^^^^^^^^^^^^^ the trait `frame_system::pallet::Config` is not implemented for `Runtime` | - ::: $WORKSPACE/frame/support/test/pallet/src/lib.rs +note: required by a bound in `Config` + --> $DIR/lib.rs:30:20 | - | pub trait Config: frame_system::Config {} - | -------------------- required by this bound in `Config` +30 | pub trait Config: frame_system::Config {} + | ^^^^^^^^^^^^^^^^^^^^ required by this bound in `Config` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index 2629cf4101923..c4e567102a892 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -66,8 +66,11 @@ help: consider importing one of these items error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied --> $DIR/undefined_call_part.rs:20:6 | -8 | pub trait Config: frame_system::Config {} - | -------------------- required by this bound in `pallet::Config` -... 20 | impl pallet::Config for Runtime {} | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` + | +note: required by a bound in `pallet::Config` + --> $DIR/undefined_call_part.rs:8:20 + | +8 | pub trait Config: frame_system::Config {} + | ^^^^^^^^^^^^^^^^^^^^ required by this bound in `pallet::Config` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index af69b79ed1a64..da972f6f4b2f4 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -104,8 +104,11 @@ help: consider importing one of these items error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied --> $DIR/undefined_event_part.rs:20:6 | -8 | pub trait Config: frame_system::Config {} - | -------------------- required by this bound in `pallet::Config` -... 20 | impl pallet::Config for Runtime {} | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` + | +note: required by a bound in `pallet::Config` + --> $DIR/undefined_event_part.rs:8:20 + | +8 | pub trait Config: frame_system::Config {} + | ^^^^^^^^^^^^^^^^^^^^ required by this bound in `pallet::Config` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index bfedb921bca44..8e40773b65736 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -84,8 +84,11 @@ help: consider importing this struct error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied --> $DIR/undefined_genesis_config_part.rs:20:6 | -8 | pub trait Config: frame_system::Config {} - | -------------------- required by this bound in `pallet::Config` -... 20 | impl pallet::Config for Runtime {} | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` + | +note: required by a bound in `pallet::Config` + --> $DIR/undefined_genesis_config_part.rs:8:20 + | +8 | pub trait Config: frame_system::Config {} + | ^^^^^^^^^^^^^^^^^^^^ required by this bound in `pallet::Config` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 50dde1108263b..ae461473c3b11 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -66,8 +66,11 @@ help: consider importing one of these items error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied --> $DIR/undefined_inherent_part.rs:20:6 | -8 | pub trait Config: frame_system::Config {} - | -------------------- required by this bound in `pallet::Config` -... 20 | impl pallet::Config for Runtime {} | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` + | +note: required by a bound in `pallet::Config` + --> $DIR/undefined_inherent_part.rs:8:20 + | +8 | pub trait Config: frame_system::Config {} + | ^^^^^^^^^^^^^^^^^^^^ required by this bound in `pallet::Config` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index b5f3ec4d381bc..dbdd9f869a2e3 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -104,8 +104,11 @@ help: consider importing one of these items error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied --> $DIR/undefined_origin_part.rs:20:6 | -8 | pub trait Config: frame_system::Config {} - | -------------------- required by this bound in `pallet::Config` -... 20 | impl pallet::Config for Runtime {} | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` + | +note: required by a bound in `pallet::Config` + --> $DIR/undefined_origin_part.rs:8:20 + | +8 | pub trait Config: frame_system::Config {} + | ^^^^^^^^^^^^^^^^^^^^ required by this bound in `pallet::Config` diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index 12bdce67cf038..8126d2f9a3e0f 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -66,8 +66,11 @@ help: consider importing one of these items error[E0277]: the trait bound `Runtime: frame_system::Config` is not satisfied --> $DIR/undefined_validate_unsigned_part.rs:20:6 | -8 | pub trait Config: frame_system::Config {} - | -------------------- required by this bound in `pallet::Config` -... 20 | impl pallet::Config for Runtime {} | ^^^^^^^^^^^^^^ the trait `frame_system::Config` is not implemented for `Runtime` + | +note: required by a bound in `pallet::Config` + --> $DIR/undefined_validate_unsigned_part.rs:8:20 + | +8 | pub trait Config: frame_system::Config {} + | ^^^^^^^^^^^^^^^^^^^^ required by this bound in `pallet::Config` diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index fce13d6f17f06..19fb3ac248b81 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -4,9 +4,9 @@ error[E0277]: can't compare `Foo` with `Foo` 6 | struct Foo { | ^^^ no implementation for `Foo == Foo` | - ::: $RUST/core/src/cmp.rs - | - | pub trait Eq: PartialEq { - | --------------- required by this bound in `std::cmp::Eq` - | = help: the trait `PartialEq` is not implemented for `Foo` +note: required by a bound in `std::cmp::Eq` + --> $DIR/cmp.rs:272:15 + | +272 | pub trait Eq: PartialEq { + | ^^^^^^^^^^^^^^^ required by this bound in `std::cmp::Eq` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 3d1ea1adc9862..b7a8e40e69a14 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -29,4 +29,4 @@ error[E0369]: binary operation `==` cannot be applied to type `&::Bar: WrapperTypeEncode` is not satisfied --> $DIR/call_argument_invalid_bound_2.rs:1:1 @@ -47,12 +47,12 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is 17 | | #[pallet::call] | |__________________^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | - ::: $CARGO/parity-scale-codec-2.3.1/src/codec.rs - | - | fn encode_to(&self, dest: &mut T) { - | ------ required by this bound in `encode_to` - | = note: required because of the requirements on the impl of `Encode` for `::Bar` +note: required by a bound in `encode_to` + --> $DIR/codec.rs:223:18 + | +223 | fn encode_to(&self, dest: &mut T) { + | ^^^^^^ required by this bound in `encode_to` = note: this error originates in the derive macro `frame_support::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied @@ -61,9 +61,9 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is 20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` | - ::: $CARGO/parity-scale-codec-2.3.1/src/codec.rs - | - | fn decode(input: &mut I) -> Result; - | ----- required by this bound in `parity_scale_codec::Decode::decode` - | = note: required because of the requirements on the impl of `Decode` for `::Bar` +note: required by a bound in `parity_scale_codec::Decode::decode` + --> $DIR/codec.rs:284:15 + | +284 | fn decode(input: &mut I) -> Result; + | ^^^^^ required by this bound in `parity_scale_codec::Decode::decode` diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index bf4c05bb4e5b5..06384b0b66b84 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -19,7 +19,7 @@ error[E0369]: binary operation `==` cannot be applied to type `& { - | ^^^^^^^^^^^^^^^^^^^^^ + | +++++++++++++++++++++ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` --> $DIR/event_field_not_member.rs:23:7 diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index ad8300b8d89b8..057611367fe5f 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -4,7 +4,8 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is 22 | impl GenesisBuild for GenesisConfig {} | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` | - ::: $WORKSPACE/frame/support/src/traits/hooks.rs +note: required by a bound in `GenesisBuild` + --> $DIR/hooks.rs:297:36 | - | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { - | ------- required by this bound in `GenesisBuild` +297 | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { + | ^^^^^^^ required by this bound in `GenesisBuild` diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index ecb57bec37a7b..d1a89fbb850e9 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -12,4 +12,4 @@ note: trait defined here, with 1 generic parameter: `BlockNumber` help: add missing generic argument | 12 | impl Hooks for Pallet {} - | ^^^^^^^^^^^^^^^^^^ + | ~~~~~~~~~~~~~~~~~~ diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index cd3032c49735a..82fd3ad884f90 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -1,3 +1,49 @@ +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 | @@ -57,49 +103,3 @@ note: required by `build_metadata` | 113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` -note: required by `partial_storage_info` - --> $DIR/storage.rs:88:2 - | -88 | fn partial_storage_info() -> Vec; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` -note: required by `partial_storage_info` - --> $DIR/storage.rs:88:2 - | -88 | fn partial_storage_info() -> Vec; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` -note: required by `partial_storage_info` - --> $DIR/storage.rs:88:2 - | -88 | fn partial_storage_info() -> Vec; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 3d03af836986a..eb1404fc62c38 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -1,3 +1,49 @@ +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 + | +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 | @@ -57,49 +103,3 @@ note: required by `build_metadata` | 113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` -note: required by `partial_storage_info` - --> $DIR/storage.rs:88:2 - | -88 | fn partial_storage_info() -> Vec; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` -note: required by `partial_storage_info` - --> $DIR/storage.rs:88:2 - | -88 | fn partial_storage_info() -> Vec; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` -note: required by `partial_storage_info` - --> $DIR/storage.rs:88:2 - | -88 | fn partial_storage_info() -> Vec; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr index 85d7342b253d4..6288dcd534b60 100644 --- a/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr +++ b/frame/support/test/tests/pallet_ui/type_value_forgotten_where_clause.stderr @@ -1,47 +1,53 @@ error[E0277]: the trait bound `::AccountId: From` is not satisfied --> $DIR/type_value_forgotten_where_clause.rs:24:34 | -7 | pub trait Config: frame_system::Config - | ------ required by a bound in this -8 | where ::AccountId: From - | --------- required by this bound in `pallet::Config` -... 24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } | ^^^^^^ the trait `From` is not implemented for `::AccountId` | +note: required by a bound in `pallet::Config` + --> $DIR/type_value_forgotten_where_clause.rs:8:51 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | ^^^^^^^^^ required by this bound in `pallet::Config` help: consider further restricting the associated type | 24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | +++++++++++++++++++++++++++++++++++++++++++++++++++++++ error[E0277]: the trait bound `::AccountId: From` is not satisfied --> $DIR/type_value_forgotten_where_clause.rs:24:12 | -7 | pub trait Config: frame_system::Config - | ------ required by a bound in this -8 | where ::AccountId: From - | --------- required by this bound in `pallet::Config` -... 24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` | +note: required by a bound in `pallet::Config` + --> $DIR/type_value_forgotten_where_clause.rs:8:51 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | ^^^^^^^^^ required by this bound in `pallet::Config` help: consider further restricting the associated type | 24 | #[pallet::type_value where ::AccountId: From] fn Foo() -> u32 { 3u32 } - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | +++++++++++++++++++++++++++++++++++++++++++++++++++++++ error[E0277]: the trait bound `::AccountId: From` is not satisfied --> $DIR/type_value_forgotten_where_clause.rs:24:12 | -7 | pub trait Config: frame_system::Config - | ------ required by a bound in this -8 | where ::AccountId: From - | --------- required by this bound in `pallet::Config` -... 24 | #[pallet::type_value] fn Foo() -> u32 { 3u32 } | ^^^^^^^^^^ the trait `From` is not implemented for `::AccountId` | +note: required by a bound in `pallet::Config` + --> $DIR/type_value_forgotten_where_clause.rs:8:51 + | +7 | pub trait Config: frame_system::Config + | ------ required by a bound in this +8 | where ::AccountId: From + | ^^^^^^^^^ required by this bound in `pallet::Config` help: consider further restricting the associated type | 24 | #[pallet::type_value] fn Foo() -> u32 where ::AccountId: From { 3u32 } - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | +++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 063cbff60f81e..d11aebbf149bb 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -58,7 +58,10 @@ error[E0308]: mismatched types --> $DIR/type_reference_in_impl_runtime_apis_call.rs:19:11 | 19 | fn test(data: &u64) { - | ^^^^^^^ - | | - | expected `u64`, found `&u64` - | help: consider removing the borrow: `data` + | ^^^^^^^ expected `u64`, found `&u64` + | +help: consider removing the borrow + | +19 - fn test(data: &u64) { +19 + fn test(data: &u64) { + | From 45f0007c262b4e51f82d02f13aece1a0401028b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Nov 2021 19:52:41 +0000 Subject: [PATCH 043/162] Bump directories from 3.0.2 to 4.0.1 (#10163) Bumps [directories](https://github.com/soc/directories-rs) from 3.0.2 to 4.0.1. - [Release notes](https://github.com/soc/directories-rs/releases) - [Commits](https://github.com/soc/directories-rs/commits) --- updated-dependencies: - dependency-name: directories dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- client/service/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 39dedb31839dd..307274395cc3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1531,9 +1531,9 @@ dependencies = [ [[package]] name = "directories" -version = "3.0.2" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" +checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" dependencies = [ "dirs-sys", ] diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index c3ae1452042f4..bb8c9ecf6a0cb 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -80,7 +80,7 @@ parity-util-mem = { version = "0.10.0", default-features = false, features = [ async-trait = "0.1.50" tokio = { version = "1.10", features = ["time", "rt-multi-thread"] } tempfile = "3.1.0" -directories = "3.0.2" +directories = "4.0.1" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } From 4f3b9c318551ecd88d3af6074bc05a20b060fe1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Nov 2021 22:43:23 +0000 Subject: [PATCH 044/162] Bump linregress from 0.4.3 to 0.4.4 (#10124) Bumps [linregress](https://github.com/n1m3/linregress) from 0.4.3 to 0.4.4. - [Release notes](https://github.com/n1m3/linregress/releases) - [Changelog](https://github.com/n1m3/linregress/blob/master/CHANGELOG.md) - [Commits](https://github.com/n1m3/linregress/compare/0.4.3...0.4.4) --- updated-dependencies: - dependency-name: linregress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- frame/benchmarking/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 307274395cc3c..7ade00bc40040 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3927,9 +3927,9 @@ dependencies = [ [[package]] name = "linregress" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6e407dadb4ca4b31bc69c27aff00e7ca4534fdcee855159b039a7cebb5f395" +checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8" dependencies = [ "nalgebra", "statrs", diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 0600b934d44e2..dffada428248a 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -linregress = { version = "0.4.3", optional = true } +linregress = { version = "0.4.4", optional = true } paste = "1.0" codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } From e40710abffde7dffcd30eb51756a7a1e9eb0e249 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Thu, 4 Nov 2021 11:11:39 +0100 Subject: [PATCH 045/162] handle doc on type_value (#10132) --- .../procedural/src/pallet/expand/type_value.rs | 3 +++ .../procedural/src/pallet/parse/type_value.rs | 18 +++++++++++++++--- frame/support/test/tests/pallet.rs | 1 + 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/pallet/expand/type_value.rs b/frame/support/procedural/src/pallet/expand/type_value.rs index 535a187773807..af4f0b3a67947 100644 --- a/frame/support/procedural/src/pallet/expand/type_value.rs +++ b/frame/support/procedural/src/pallet/expand/type_value.rs @@ -59,7 +59,10 @@ pub fn expand_type_values(def: &mut Def) -> proc_macro2::TokenStream { (Default::default(), Default::default()) }; + let docs = &type_value.docs; + expand.extend(quote::quote_spanned!(type_value.attr_span => + #( #[doc = #docs] )* #vis struct #ident<#struct_use_gen>(core::marker::PhantomData<((), #struct_use_gen)>); impl<#struct_impl_gen> #frame_support::traits::Get<#type_> for #ident<#struct_use_gen> #where_clause diff --git a/frame/support/procedural/src/pallet/parse/type_value.rs b/frame/support/procedural/src/pallet/parse/type_value.rs index 7b9d57472db4b..d6cd4d02df8bd 100644 --- a/frame/support/procedural/src/pallet/parse/type_value.rs +++ b/frame/support/procedural/src/pallet/parse/type_value.rs @@ -38,6 +38,8 @@ pub struct TypeValueDef { pub where_clause: Option, /// The span of the pallet::type_value attribute. pub attr_span: proc_macro2::Span, + /// Docs on the item. + pub docs: Vec, } impl TypeValueDef { @@ -53,9 +55,18 @@ impl TypeValueDef { return Err(syn::Error::new(item.span(), msg)) }; - if !item.attrs.is_empty() { - let msg = "Invalid pallet::type_value, unexpected attribute"; - return Err(syn::Error::new(item.attrs[0].span(), msg)) + let mut docs = vec![]; + for attr in &item.attrs { + if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { + if meta.path.get_ident().map_or(false, |ident| ident == "doc") { + docs.push(meta.lit); + continue + } + } + + let msg = "Invalid pallet::type_value, unexpected attribute, only doc attribute are \ + allowed"; + return Err(syn::Error::new(attr.span(), msg)) } if let Some(span) = item @@ -106,6 +117,7 @@ impl TypeValueDef { type_, instances, where_clause, + docs, }) } } diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index a314f576187dc..dd171c5b12ec2 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -262,6 +262,7 @@ pub mod pallet { #[pallet::storage_prefix = "Value2"] pub type RenamedValue = StorageValue; + /// Test some doc #[pallet::type_value] pub fn MyDefault() -> u16 where From 6af19fdc47f16bee2901908e34b70d6d7ba80c59 Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Thu, 4 Nov 2021 14:37:26 +0100 Subject: [PATCH 046/162] Update node-template `construct_runtime!` syntax (#10155) * update `construct_runtime!` syntax * fix build --- bin/node-template/node/src/chain_spec.rs | 1 + bin/node-template/runtime/src/lib.rs | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index 7009b3be5c279..baf5e5d41ab85 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -150,5 +150,6 @@ fn testnet_genesis( // Assign network admin rights. key: root_key, }, + transaction_payment: Default::default(), } } diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index dbea698002c60..8ecb2199dda71 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -286,16 +286,16 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Aura: pallet_aura::{Pallet, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + System: frame_system, + RandomnessCollectiveFlip: pallet_randomness_collective_flip, + Timestamp: pallet_timestamp, + Aura: pallet_aura, + Grandpa: pallet_grandpa, + Balances: pallet_balances, + TransactionPayment: pallet_transaction_payment, + Sudo: pallet_sudo, // Include the custom logic from the pallet-template in the runtime. - TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, + TemplateModule: pallet_template, } ); From a9465729e2c5d2ef8d87ac404da27e5e10adde8a Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Fri, 5 Nov 2021 03:34:29 +1300 Subject: [PATCH 047/162] implement dispatch_as (#9934) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implement dispatch_as * fix * fix * weight for dispatch_as * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_utility --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/utility/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fix * Update frame/utility/src/benchmarking.rs Co-authored-by: Alexander Theißen * fix issues Co-authored-by: Parity Bot Co-authored-by: Alexander Theißen Co-authored-by: Shawn Tabrizi --- bin/node/runtime/src/lib.rs | 1 + frame/contracts/src/tests.rs | 1 + frame/proxy/src/tests.rs | 1 + frame/utility/src/benchmarking.rs | 9 ++++++++ frame/utility/src/lib.rs | 38 +++++++++++++++++++++++++++++++ frame/utility/src/tests.rs | 1 + frame/utility/src/weights.rs | 33 ++++++++++++++++----------- 7 files changed, 71 insertions(+), 13 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c0ad9bb006c92..570abe53ed01f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -226,6 +226,7 @@ impl pallet_randomness_collective_flip::Config for Runtime {} impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; + type PalletsOrigin = OriginCaller; type WeightInfo = pallet_utility::weights::SubstrateWeight; } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 310c1d4cb2dd9..bd5dbae5b34a6 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -244,6 +244,7 @@ impl pallet_timestamp::Config for Test { impl pallet_utility::Config for Test { type Event = Event; type Call = Call; + type PalletsOrigin = OriginCaller; type WeightInfo = (); } parameter_types! { diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 20efd085fe882..93a0e4ce7d622 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -96,6 +96,7 @@ impl pallet_balances::Config for Test { impl pallet_utility::Config for Test { type Event = Event; type Call = Call; + type PalletsOrigin = OriginCaller; type WeightInfo = (); } parameter_types! { diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 70cc61f87b9c9..ce59d7e898eb1 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -30,6 +30,7 @@ fn assert_last_event(generic_event: ::Event) { } benchmarks! { + where_clause { where ::PalletsOrigin: Clone } batch { let c in 0 .. 1000; let mut calls: Vec<::Call> = Vec::new(); @@ -64,5 +65,13 @@ benchmarks! { assert_last_event::(Event::BatchCompleted.into()) } + dispatch_as { + let caller = account("caller", SEED, SEED); + let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); + let origin: T::Origin = RawOrigin::Signed(caller).into(); + let pallets_origin: ::PalletsOrigin = origin.caller().clone(); + let pallets_origin = Into::::into(pallets_origin.clone()); + }: _(RawOrigin::Root, Box::new(pallets_origin), call) + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 8712cf74f451b..241526cef2230 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -96,6 +96,11 @@ pub mod pallet { + IsSubType> + IsType<::Call>; + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin: Parameter + + Into<::Origin> + + IsType<<::Origin as frame_support::traits::OriginTrait>::PalletsOrigin>; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -110,6 +115,8 @@ pub mod pallet { BatchCompleted, /// A single item within a Batch of dispatches has completed with no error. ItemCompleted, + /// A call was dispatched. \[result\] + DispatchedAs(DispatchResult), } // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm @@ -342,6 +349,37 @@ pub mod pallet { let base_weight = T::WeightInfo::batch_all(calls_len as u32); Ok(Some(base_weight + weight).into()) } + + /// Dispatches a function call with a provided origin. + /// + /// The dispatch origin for this call must be _Root_. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB write (event). + /// - Weight of derivative `call` execution + T::WeightInfo::dispatch_as(). + /// # + #[pallet::weight({ + let dispatch_info = call.get_dispatch_info(); + ( + T::WeightInfo::dispatch_as() + .saturating_add(dispatch_info.weight), + dispatch_info.class, + ) + })] + pub fn dispatch_as( + origin: OriginFor, + as_origin: Box, + call: Box<::Call>, + ) -> DispatchResult { + ensure_root(origin)?; + + let res = call.dispatch_bypass_filter((*as_origin).into()); + + Self::deposit_event(Event::DispatchedAs(res.map(|_| ()).map_err(|e| e.error))); + Ok(()) + } } } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index cb7a3d9a21e29..f4d09a30ec078 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -168,6 +168,7 @@ impl Contains for TestBaseCallFilter { impl Config for Test { type Event = Event; type Call = Call; + type PalletsOrigin = OriginCaller; type WeightInfo = (); } diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 6ac23419e3ef7..bce18271684bc 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_utility //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-10-30, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -48,39 +48,46 @@ pub trait WeightInfo { fn batch(c: u32, ) -> Weight; fn as_derivative() -> Weight; fn batch_all(c: u32, ) -> Weight; + fn dispatch_as() -> Weight; } /// Weights for pallet_utility using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn batch(c: u32, ) -> Weight { - (30_319_000 as Weight) + (18_293_000 as Weight) // Standard Error: 3_000 - .saturating_add((6_759_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((5_530_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - (4_030_000 as Weight) + (3_387_000 as Weight) } fn batch_all(c: u32, ) -> Weight { - (26_621_000 as Weight) - // Standard Error: 3_000 - .saturating_add((7_251_000 as Weight).saturating_mul(c as Weight)) + (19_223_000 as Weight) + // Standard Error: 4_000 + .saturating_add((5_998_000 as Weight).saturating_mul(c as Weight)) + } + fn dispatch_as() -> Weight { + (14_340_000 as Weight) } } // For backwards compatibility and tests impl WeightInfo for () { fn batch(c: u32, ) -> Weight { - (30_319_000 as Weight) + (18_293_000 as Weight) // Standard Error: 3_000 - .saturating_add((6_759_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((5_530_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - (4_030_000 as Weight) + (3_387_000 as Weight) } fn batch_all(c: u32, ) -> Weight { - (26_621_000 as Weight) - // Standard Error: 3_000 - .saturating_add((7_251_000 as Weight).saturating_mul(c as Weight)) + (19_223_000 as Weight) + // Standard Error: 4_000 + .saturating_add((5_998_000 as Weight).saturating_mul(c as Weight)) + } + fn dispatch_as() -> Weight { + (14_340_000 as Weight) } } From b89bb4c969895738ee7d76dca8811e12317a7f52 Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Thu, 4 Nov 2021 19:54:31 +0100 Subject: [PATCH 048/162] rm broken rustdoc-header.html (#10091) --- .gitlab-ci.yml | 3 +-- .maintain/rustdoc-header.html | 10 ---------- 2 files changed, 1 insertion(+), 12 deletions(-) delete mode 100644 .maintain/rustdoc-header.html diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 87efc43af2898..5e406b36c5c5b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -661,8 +661,7 @@ build-rustdoc: - ./crate-docs/ script: # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"` - - RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" - time cargo +nightly doc --no-deps --workspace --all-features --verbose + - time cargo +nightly doc --no-deps --workspace --all-features --verbose - rm -f ./target/doc/.lock - mv ./target/doc ./crate-docs # FIXME: remove me after CI image gets nonroot diff --git a/.maintain/rustdoc-header.html b/.maintain/rustdoc-header.html deleted file mode 100644 index a679d5e299da7..0000000000000 --- a/.maintain/rustdoc-header.html +++ /dev/null @@ -1,10 +0,0 @@ - - - - From 75458291756cad3b51346cd098a3dfcd53b5d475 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 4 Nov 2021 19:54:48 +0100 Subject: [PATCH 049/162] Switch to Rust 2021 (#10170) * Switch to Rust 2021 * Update trybuild to fix errors --- .maintain/node-template-release/Cargo.toml | 2 +- Cargo.lock | 4 +-- bin/node-template/node/Cargo.toml | 2 +- bin/node-template/runtime/Cargo.toml | 2 +- bin/node/bench/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- bin/node/executor/Cargo.toml | 2 +- bin/node/inspect/Cargo.toml | 2 +- bin/node/primitives/Cargo.toml | 2 +- bin/node/rpc-client/Cargo.toml | 2 +- bin/node/rpc/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- bin/node/test-runner-example/Cargo.toml | 2 +- bin/node/testing/Cargo.toml | 2 +- bin/utils/chain-spec-builder/Cargo.toml | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/allocator/Cargo.toml | 2 +- client/api/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/basic-authorship/Cargo.toml | 2 +- client/beefy/Cargo.toml | 2 +- client/beefy/rpc/Cargo.toml | 2 +- client/block-builder/Cargo.toml | 2 +- client/chain-spec/Cargo.toml | 2 +- client/chain-spec/derive/Cargo.toml | 2 +- client/chain-spec/derive/src/impls.rs | 2 +- client/cli/Cargo.toml | 2 +- client/consensus/aura/Cargo.toml | 2 +- client/consensus/babe/Cargo.toml | 2 +- client/consensus/babe/rpc/Cargo.toml | 2 +- client/consensus/common/Cargo.toml | 2 +- client/consensus/epochs/Cargo.toml | 2 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/consensus/pow/Cargo.toml | 2 +- client/consensus/slots/Cargo.toml | 2 +- client/consensus/uncles/Cargo.toml | 2 +- client/db/Cargo.toml | 2 +- client/executor/Cargo.toml | 2 +- client/executor/common/Cargo.toml | 2 +- client/executor/runtime-test/Cargo.toml | 2 +- client/executor/src/wasm_runtime.rs | 1 - client/executor/wasmi/Cargo.toml | 2 +- client/executor/wasmtime/Cargo.toml | 2 +- client/finality-grandpa-warp-sync/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/finality-grandpa/rpc/Cargo.toml | 2 +- client/finality-grandpa/rpc/src/report.rs | 4 --- client/informant/Cargo.toml | 2 +- client/keystore/Cargo.toml | 2 +- client/light/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/network/src/protocol/sync.rs | 1 - .../src/protocol/sync/extra_requests.rs | 1 - client/network/test/Cargo.toml | 2 +- client/offchain/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/proposer-metrics/Cargo.toml | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc-servers/Cargo.toml | 2 +- client/rpc/Cargo.toml | 2 +- client/rpc/src/chain/mod.rs | 2 -- client/service/Cargo.toml | 2 +- client/service/test/Cargo.toml | 2 +- client/state-db/Cargo.toml | 2 +- client/sync-state-rpc/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- client/tracing/Cargo.toml | 2 +- client/tracing/proc-macro/Cargo.toml | 2 +- client/transaction-pool/Cargo.toml | 2 +- client/transaction-pool/api/Cargo.toml | 2 +- client/utils/Cargo.toml | 2 +- frame/assets/Cargo.toml | 2 +- frame/atomic-swap/Cargo.toml | 2 +- frame/aura/Cargo.toml | 2 +- frame/authority-discovery/Cargo.toml | 2 +- frame/authorship/Cargo.toml | 2 +- frame/babe/Cargo.toml | 2 +- frame/bags-list/Cargo.toml | 2 +- frame/bags-list/fuzzer/Cargo.toml | 2 +- frame/bags-list/remote-tests/Cargo.toml | 2 +- frame/balances/Cargo.toml | 2 +- frame/beefy-mmr/Cargo.toml | 2 +- frame/beefy-mmr/primitives/Cargo.toml | 2 +- frame/beefy/Cargo.toml | 2 +- frame/benchmarking/Cargo.toml | 2 +- frame/bounties/Cargo.toml | 2 +- frame/collective/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 2 +- frame/contracts/common/Cargo.toml | 2 +- frame/contracts/proc-macro/Cargo.toml | 2 +- frame/contracts/rpc/Cargo.toml | 2 +- frame/contracts/rpc/runtime-api/Cargo.toml | 2 +- frame/democracy/Cargo.toml | 2 +- .../election-provider-multi-phase/Cargo.toml | 2 +- frame/election-provider-support/Cargo.toml | 2 +- frame/elections-phragmen/Cargo.toml | 2 +- frame/elections/Cargo.toml | 2 +- frame/example-offchain-worker/Cargo.toml | 2 +- frame/example-parallel/Cargo.toml | 2 +- frame/example-parallel/src/lib.rs | 1 - frame/example/Cargo.toml | 2 +- frame/executive/Cargo.toml | 2 +- frame/gilt/Cargo.toml | 2 +- frame/grandpa/Cargo.toml | 2 +- frame/identity/Cargo.toml | 2 +- frame/im-online/Cargo.toml | 2 +- frame/indices/Cargo.toml | 2 +- frame/lottery/Cargo.toml | 2 +- frame/membership/Cargo.toml | 2 +- frame/merkle-mountain-range/Cargo.toml | 2 +- .../primitives/Cargo.toml | 2 +- frame/merkle-mountain-range/rpc/Cargo.toml | 2 +- frame/multisig/Cargo.toml | 2 +- frame/nicks/Cargo.toml | 2 +- frame/node-authorization/Cargo.toml | 2 +- frame/offences/Cargo.toml | 2 +- frame/offences/benchmarking/Cargo.toml | 2 +- frame/proxy/Cargo.toml | 2 +- frame/randomness-collective-flip/Cargo.toml | 2 +- frame/recovery/Cargo.toml | 2 +- frame/scheduler/Cargo.toml | 2 +- frame/scored-pool/Cargo.toml | 2 +- frame/session/Cargo.toml | 2 +- frame/session/benchmarking/Cargo.toml | 2 +- frame/society/Cargo.toml | 2 +- frame/staking/Cargo.toml | 2 +- frame/staking/reward-curve/Cargo.toml | 2 +- frame/staking/reward-fn/Cargo.toml | 2 +- frame/staking/src/pallet/impls.rs | 3 -- frame/sudo/Cargo.toml | 2 +- frame/support/Cargo.toml | 2 +- frame/support/procedural/Cargo.toml | 2 +- .../procedural/src/pallet/expand/constants.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 4 +-- .../src/storage/genesis_config/mod.rs | 8 ++--- .../support/procedural/src/storage/getters.rs | 2 +- .../procedural/src/storage/metadata.rs | 2 +- .../procedural/src/storage/storage_info.rs | 2 +- .../procedural/src/storage/store_trait.rs | 2 +- frame/support/procedural/tools/Cargo.toml | 2 +- .../procedural/tools/derive/Cargo.toml | 2 +- .../procedural/tools/derive/src/lib.rs | 2 +- frame/support/src/traits/members.rs | 2 +- frame/support/test/Cargo.toml | 8 +++-- frame/support/test/compile_pass/Cargo.toml | 2 +- frame/support/test/pallet/Cargo.toml | 2 +- frame/support/test/tests/pallet.rs | 34 +++++++++---------- frame/system/Cargo.toml | 2 +- frame/system/benchmarking/Cargo.toml | 2 +- frame/system/rpc/runtime-api/Cargo.toml | 2 +- frame/timestamp/Cargo.toml | 2 +- frame/tips/Cargo.toml | 2 +- frame/transaction-payment/Cargo.toml | 2 +- frame/transaction-payment/rpc/Cargo.toml | 2 +- .../rpc/runtime-api/Cargo.toml | 2 +- frame/transaction-storage/Cargo.toml | 2 +- frame/treasury/Cargo.toml | 2 +- frame/try-runtime/Cargo.toml | 2 +- frame/uniques/Cargo.toml | 2 +- frame/utility/Cargo.toml | 2 +- frame/vesting/Cargo.toml | 2 +- primitives/api/Cargo.toml | 2 +- primitives/api/proc-macro/Cargo.toml | 2 +- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- primitives/api/src/lib.rs | 2 -- primitives/api/test/Cargo.toml | 4 +-- primitives/application-crypto/Cargo.toml | 2 +- primitives/application-crypto/test/Cargo.toml | 2 +- primitives/arithmetic/Cargo.toml | 2 +- primitives/arithmetic/fuzzer/Cargo.toml | 2 +- primitives/authority-discovery/Cargo.toml | 2 +- primitives/authorship/Cargo.toml | 2 +- primitives/beefy/Cargo.toml | 2 +- primitives/block-builder/Cargo.toml | 2 +- primitives/blockchain/Cargo.toml | 2 +- primitives/consensus/aura/Cargo.toml | 2 +- primitives/consensus/babe/Cargo.toml | 2 +- primitives/consensus/common/Cargo.toml | 2 +- primitives/consensus/pow/Cargo.toml | 2 +- primitives/consensus/slots/Cargo.toml | 2 +- primitives/consensus/vrf/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/core/hashing/Cargo.toml | 2 +- primitives/core/hashing/proc-macro/Cargo.toml | 2 +- primitives/database/Cargo.toml | 2 +- primitives/debug-derive/Cargo.toml | 2 +- primitives/externalities/Cargo.toml | 2 +- primitives/finality-grandpa/Cargo.toml | 2 +- primitives/finality-grandpa/src/lib.rs | 1 - primitives/inherents/Cargo.toml | 2 +- primitives/io/Cargo.toml | 2 +- primitives/keyring/Cargo.toml | 2 +- primitives/keystore/Cargo.toml | 2 +- primitives/maybe-compressed-blob/Cargo.toml | 2 +- primitives/npos-elections/Cargo.toml | 2 +- primitives/npos-elections/fuzzer/Cargo.toml | 2 +- .../npos-elections/solution-type/Cargo.toml | 4 +-- primitives/offchain/Cargo.toml | 2 +- primitives/panic-handler/Cargo.toml | 2 +- primitives/rpc/Cargo.toml | 2 +- primitives/runtime-interface/Cargo.toml | 4 +-- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../test-wasm-deprecated/Cargo.toml | 2 +- .../runtime-interface/test-wasm/Cargo.toml | 2 +- primitives/runtime-interface/test/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/sandbox/Cargo.toml | 2 +- primitives/serializer/Cargo.toml | 2 +- primitives/session/Cargo.toml | 2 +- primitives/staking/Cargo.toml | 2 +- primitives/state-machine/Cargo.toml | 2 +- primitives/std/Cargo.toml | 2 +- primitives/storage/Cargo.toml | 2 +- primitives/tasks/Cargo.toml | 2 +- primitives/test-primitives/Cargo.toml | 2 +- primitives/timestamp/Cargo.toml | 2 +- primitives/tracing/Cargo.toml | 2 +- primitives/transaction-pool/Cargo.toml | 2 +- .../transaction-storage-proof/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 2 +- primitives/version/Cargo.toml | 2 +- primitives/version/proc-macro/Cargo.toml | 2 +- primitives/wasm-interface/Cargo.toml | 2 +- test-utils/Cargo.toml | 4 +-- test-utils/client/Cargo.toml | 2 +- test-utils/derive/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 2 +- test-utils/runtime/client/Cargo.toml | 2 +- .../runtime/transaction-pool/Cargo.toml | 2 +- test-utils/test-crate/Cargo.toml | 2 +- test-utils/test-runner/Cargo.toml | 2 +- utils/build-script-utils/Cargo.toml | 2 +- utils/fork-tree/Cargo.toml | 2 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- utils/frame/frame-utilities-cli/Cargo.toml | 2 +- utils/frame/generate-bags/Cargo.toml | 2 +- .../generate-bags/node-runtime/Cargo.toml | 2 +- utils/frame/generate-bags/src/lib.rs | 1 - utils/frame/remote-externalities/Cargo.toml | 2 +- utils/frame/rpc/support/Cargo.toml | 2 +- utils/frame/rpc/system/Cargo.toml | 2 +- utils/frame/try-runtime/cli/Cargo.toml | 2 +- utils/prometheus/Cargo.toml | 2 +- utils/wasm-builder/Cargo.toml | 2 +- 245 files changed, 264 insertions(+), 279 deletions(-) diff --git a/.maintain/node-template-release/Cargo.toml b/.maintain/node-template-release/Cargo.toml index c1d9f2da7faea..26256e8363301 100644 --- a/.maintain/node-template-release/Cargo.toml +++ b/.maintain/node-template-release/Cargo.toml @@ -2,7 +2,7 @@ name = "node-template-release" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0" [dependencies] diff --git a/Cargo.lock b/Cargo.lock index 7ade00bc40040..4a50fe3565715 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10944,9 +10944,9 @@ dependencies = [ [[package]] name = "trybuild" -version = "1.0.43" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02c413315329fc96167f922b46fd0caa3a43f4697b7a7896b183c7142635832" +checksum = "150e726dc059e6fbd4fce3288f5bb3cf70128cf63b0dde23b938a3cad810fb23" dependencies = [ "dissimilar", "glob", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 57d9c5f3f71e0..1d8abad406a14 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" description = "A fresh FRAME-based Substrate node, ready for hacking." authors = ["Substrate DevHub "] homepage = "https://substrate.io/" -edition = "2018" +edition = "2021" license = "Unlicense" publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index e9f557f3fb5a4..e4e2039866b46 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" description = 'A fresh FRAME-based Substrate runtime, ready for hacking.' authors = ["Substrate DevHub "] homepage = "https://substrate.io/" -edition = "2018" +edition = "2021" license = "Unlicense" publish = false repository = "https://github.com/substrate-developer-hub/substrate-node-template/" diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index b19a71966fb87..f59ea2361186c 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -3,7 +3,7 @@ name = "node-bench" version = "0.9.0-dev" authors = ["Parity Technologies "] description = "Substrate node integration benchmarks." -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 1d394dd952db0..38c161a81ef06 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0-dev" authors = ["Parity Technologies "] description = "Generic Substrate node implementation in Rust." build = "build.rs" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" default-run = "substrate" homepage = "https://substrate.io" diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 462bb034610e9..21785079c6c54 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -3,7 +3,7 @@ name = "node-executor" version = "3.0.0-dev" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 6f526b896ba76..6504f39a05822 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -2,7 +2,7 @@ name = "node-inspect" version = "0.9.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 07c3aca6059f6..2c8185d5c9c47 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -2,7 +2,7 @@ name = "node-primitives" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index 59695edb6fa26..5c822ef3ad31a 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -2,7 +2,7 @@ name = "node-rpc-client" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 4c7b15459cea9..ef111e3f1949c 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "node-rpc" version = "3.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 22ff0954e2458..c0b888e55b1f6 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "node-runtime" version = "3.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" build = "build.rs" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml index 96c4c2047ac4f..b664cdb8e50e2 100644 --- a/bin/node/test-runner-example/Cargo.toml +++ b/bin/node/test-runner-example/Cargo.toml @@ -2,7 +2,7 @@ name = "test-runner-example" version = "0.1.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" publish = false [dependencies] diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index e5d13189ac2fe..1854029b0709e 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -3,7 +3,7 @@ name = "node-testing" version = "3.0.0-dev" authors = ["Parity Technologies "] description = "Test utilities for Substrate node." -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index bfbf2da57a6a0..a35fbba5cdc46 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -2,7 +2,7 @@ name = "chain-spec-builder" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 5e7615f60d628..14ba673b33be2 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -2,7 +2,7 @@ name = "subkey" version = "2.0.1" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/allocator/Cargo.toml b/client/allocator/Cargo.toml index 9383b88f756cb..2b37c192c6e3e 100644 --- a/client/allocator/Cargo.toml +++ b/client/allocator/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-allocator" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index af8704058b660..7b2952552a3d0 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-client-api" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index dca365981984d..dc4b929756810 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-authority-discovery" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 1ecdb08eba489..96ab698f36213 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-basic-authorship" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index d4541288a6287..37dc4491e21d4 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -2,7 +2,7 @@ name = "beefy-gadget" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index 8af2fa3eac867..47fd2b740370c 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "beefy-gadget-rpc" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 9d2703fc2ed2a..25950a9e9061f 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-block-builder" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index ba9655261923b..d7557e2062ac4 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-chain-spec" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index 73083e4a6e0ba..386137d0667ad 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-chain-spec-derive" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/chain-spec/derive/src/impls.rs b/client/chain-spec/derive/src/impls.rs index 8c56430e81d02..87a3db063c782 100644 --- a/client/chain-spec/derive/src/impls.rs +++ b/client/chain-spec/derive/src/impls.rs @@ -52,7 +52,7 @@ pub fn extension_derive(ast: &DeriveInput) -> proc_macro::TokenStream { use std::any::{Any, TypeId}; match TypeId::of::() { - #( x if x == TypeId::of::<#field_types>() => Any::downcast_ref(&self.#field_names) ),*, + #( x if x == TypeId::of::<#field_types>() => ::downcast_ref(&self.#field_names) ),*, _ => None, } } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 0cee37f25e797..4b6ef3bab9505 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-cli" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Substrate CLI interface." -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 1c767319b1229..62d7b3d5327ad 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-aura" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 66ad6287f40df..8430ca39c9cb7 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-babe" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 7ef9b1c1de3c4..bc57092d34001 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-babe-rpc" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 30840a974f9aa..d63d124ed60e5 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-consensus" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index 90a708d07e2a2..4cac4a24d1879 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-epochs" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 9b29bb3dc71c7..a662ebf01011d 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-manual-seal" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index c570c1ccbad27..bc7c01181e41b 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-pow" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 6c1f865c62cb0..5c5f1bdfa68cd 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-slots" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" -edition = "2018" +edition = "2021" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index 73768f0d09411..f644d64c7bbeb 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-consensus-uncles" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 165d01d555116..19bf7ad248fe5 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-client-db" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index bc55172bc33ef..028854992a094 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-executor" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 622baa6c0dcab..fc7d5a1528470 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-executor-common" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index ac1e3413491d9..1e3b5e926b964 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-runtime-test" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" publish = false diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 204a095717fc4..88c033814ed6a 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -356,7 +356,6 @@ fn decode_version(mut version: &[u8]) -> Result { fn decode_runtime_apis(apis: &[u8]) -> Result, WasmError> { use sp_api::RUNTIME_API_INFO_SIZE; - use std::convert::TryFrom; apis.chunks(RUNTIME_API_INFO_SIZE) .map(|chunk| { diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 307ba908e23b6..6311b0234866f 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-executor-wasmi" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 7a877bd9578f5..741898b8282c2 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-executor-wasmtime" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml index 9736b25ccac55..6280add275628 100644 --- a/client/finality-grandpa-warp-sync/Cargo.toml +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-finality-grandpa-warp-sync" version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 2d45fa100f79d..fd0b52cd1f70b 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-finality-grandpa" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index d2976ee71275f..ad39ad6ba95b3 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] description = "RPC extensions for the GRANDPA finality gadget" repository = "https://github.com/paritytech/substrate/" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" readme = "README.md" diff --git a/client/finality-grandpa/rpc/src/report.rs b/client/finality-grandpa/rpc/src/report.rs index fef8f22659953..fc97c176f34f0 100644 --- a/client/finality-grandpa/rpc/src/report.rs +++ b/client/finality-grandpa/rpc/src/report.rs @@ -87,8 +87,6 @@ impl RoundState { round_state: &report::RoundState, voters: &HashSet, ) -> Result { - use std::convert::TryInto; - let prevotes = &round_state.prevote_ids; let missing_prevotes = voters.difference(&prevotes).cloned().collect(); @@ -130,8 +128,6 @@ impl ReportedRoundStates { AuthoritySet: ReportAuthoritySet, VoterState: ReportVoterState, { - use std::convert::TryFrom; - let voter_state = voter_state.get().ok_or(Error::EndpointNotReady)?; let (set_id, current_voters) = authority_set.get(); diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index c28dd4e011945..7d92e14e5d471 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-informant" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Substrate informant." -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 74fd85c184c6f..bd158091e747c 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-keystore" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index cc567c60524a1..1b83bf5be42f4 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-light" version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-light" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index b4907ade834aa..1f0b5c313294d 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-network-gossip" version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-network-gossip" diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 7b4b30c593855..7c2905d5fe03a 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-network" version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-network" diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 7f85c2b637826..37646875e3b16 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -2172,7 +2172,6 @@ impl ChainSync { /// Return some key metrics. pub(crate) fn metrics(&self) -> Metrics { - use std::convert::TryInto; Metrics { queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index 226762b9658d2..c00837169598b 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -255,7 +255,6 @@ impl ExtraRequests { /// Get some key metrics. pub(crate) fn metrics(&self) -> Metrics { - use std::convert::TryInto; Metrics { pending_requests: self.pending_requests.len().try_into().unwrap_or(std::u32::MAX), active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 13555952cffd5..539c57fe4cb91 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-network-test" version = "0.8.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" publish = false homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 104a0e61f3180..ee697b32e1e4b 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-offchain" version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index a7e9130cfff1c..01b75f1094ff4 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -5,7 +5,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" name = "sc-peerset" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-peerset" readme = "README.md" diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml index 296329a5fda77..93c4cce93ca65 100644 --- a/client/proposer-metrics/Cargo.toml +++ b/client/proposer-metrics/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-proposer-metrics" version = "0.9.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 192cf02e7ce17..532a5cf6294ce 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-rpc-api" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index cbbea00d6f576..b3f408cc59806 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-rpc-server" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index f5c7f99ff7435..d8aecfe9fd354 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-rpc" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index a06c3a094b40f..9428ac3248f32 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -83,8 +83,6 @@ where match number { None => Ok(Some(self.client().info().best_hash)), Some(num_or_hex) => { - use std::convert::TryInto; - // FIXME <2329>: Database seems to limit the block number to u32 for no reason let block_num: u32 = num_or_hex.try_into().map_err(|_| { Error::Other(format!( diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index bb8c9ecf6a0cb..5c22c1e4fca1c 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-service" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index aeee4a5f90728..49eca272ac75c 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-service-test" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" publish = false homepage = "https://substrate.io" diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 136fe7a199f0c..abd378c6fff38 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-state-db" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 4166929ff0317..a9503b992ab37 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-sync-state-rpc" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "A RPC handler to create sync states for light clients." -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 502c7fc20781f..5a25aca29b60b 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-telemetry" version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Telemetry utils" -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 8093420dc5b95..8640208ad3427 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -3,7 +3,7 @@ name = "sc-tracing" version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Instrumentation implementation for substrate." diff --git a/client/tracing/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml index 5cc2d836dcada..9c8579ee66a45 100644 --- a/client/tracing/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-tracing-proc-macro" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 12642559a3b8d..6d05125002f2e 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-transaction-pool" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index 7dd1a6724ce59..176624611fbf5 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-transaction-pool-api" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml index 546232cf60708..6d04fd4e9acdf 100644 --- a/client/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -2,7 +2,7 @@ name = "sc-utils" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 65162430a2ac1..08cadb527750b 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-assets" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index 60e8fa613f06b..303116161a2dc 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-atomic-swap" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 1761f78edeca2..cb5903048ff8b 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-aura" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 7e64509ce6b45..06a4e92270b2b 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-authority-discovery" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 6ac91970712d7..a24b578fc250e 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -3,7 +3,7 @@ name = "pallet-authorship" version = "4.0.0-dev" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 6b0dc71b5e29a..9ae942486d627 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-babe" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 372dc87e212e2..fa47b9bad5692 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-bags-list" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/bags-list/fuzzer/Cargo.toml b/frame/bags-list/fuzzer/Cargo.toml index 171e0e7af70cd..510000f631adc 100644 --- a/frame/bags-list/fuzzer/Cargo.toml +++ b/frame/bags-list/fuzzer/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-bags-list-fuzzer" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/bags-list/remote-tests/Cargo.toml b/frame/bags-list/remote-tests/Cargo.toml index ee5b8c7c3f6e7..37f351f0d27ef 100644 --- a/frame/bags-list/remote-tests/Cargo.toml +++ b/frame/bags-list/remote-tests/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index f6e6e97850a72..4ea2e9cbe8a31 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-balances" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml index 3d4a9a72ddf86..0a72ee193b3a7 100644 --- a/frame/beefy-mmr/Cargo.toml +++ b/frame/beefy-mmr/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-beefy-mmr" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" description = "BEEFY + MMR runtime utilities" diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml index d5dcc0eed3350..3669ba4c2286c 100644 --- a/frame/beefy-mmr/primitives/Cargo.toml +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -2,7 +2,7 @@ name = "beefy-merkle-tree" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" description = "A no-std/Substrate compatible library to construct binary merkle tree." diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml index e5af666e7ca54..a8b516aac66ca 100644 --- a/frame/beefy/Cargo.toml +++ b/frame/beefy/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-beefy" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" [dependencies] diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index dffada428248a..fdc386978dee5 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-benchmarking" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index d949d0fb1d58e..cce6ed69a25ba 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-bounties" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 3e3d167522e81..6d4567a7851e2 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-collective" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 5967600bf68f5..dec33768a0426 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-contracts" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 48baf23d3aee8..c30efc6869403 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-contracts-primitives" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/contracts/proc-macro/Cargo.toml b/frame/contracts/proc-macro/Cargo.toml index 63d05bcc2f05e..db3c620397771 100644 --- a/frame/contracts/proc-macro/Cargo.toml +++ b/frame/contracts/proc-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-contracts-proc-macro" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index eec02b03aaa0e..82ce4bedb4985 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-contracts-rpc" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index e41aa5aaec9b5..b65fbf9aba0c7 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-contracts-rpc-runtime-api" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index f0bf2109be065..1973677531e9a 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-democracy" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index 3c6b405c331f0..8a0f80da10829 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index 46e6500cac33a..e22338f456c44 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-election-provider-support" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 80afc3f5d8b00..6c87be7b6d589 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-elections-phragmen" version = "5.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index f5c7acb1cd5d5..2ca2e584d2f43 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-elections" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 2759664d6e653..ffcadca26e11e 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-example-offchain-worker" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Unlicense" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/example-parallel/Cargo.toml b/frame/example-parallel/Cargo.toml index 6511a1cd369bf..169db35e65f1c 100644 --- a/frame/example-parallel/Cargo.toml +++ b/frame/example-parallel/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-example-parallel" version = "3.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Unlicense" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/example-parallel/src/lib.rs b/frame/example-parallel/src/lib.rs index c86cac4295684..9d191525f631e 100644 --- a/frame/example-parallel/src/lib.rs +++ b/frame/example-parallel/src/lib.rs @@ -105,7 +105,6 @@ impl EnlistedParticipant { fn verify(&self, event_id: &[u8]) -> bool { use sp_core::Public; use sp_runtime::traits::Verify; - use std::convert::TryFrom; match sp_core::sr25519::Signature::try_from(&self.signature[..]) { Ok(signature) => { diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index e144f1e927d36..0b3a742de0dd7 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-example" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Unlicense" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 44b4dbcf2bd30..52d2f41cb1e34 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-executive" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml index c7dc384662f97..6b2eae1156a89 100644 --- a/frame/gilt/Cargo.toml +++ b/frame/gilt/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-gilt" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 36cc43fc3443e..af125d64ea218 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-grandpa" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index e6d1fa3e9dfbc..9a370674e5876 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-identity" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index ef2fe54a8ceef..f4cf5a9077c9b 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-im-online" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index bf5a82fbb1da7..f1913d4138be0 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-indices" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index 854b6f52470d7..d4ee5b8008f19 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-lottery" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 9fdfaa4731729..a3747ca0576f5 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-membership" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 942067ebde3ed..ca09725769ab2 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-mmr" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/merkle-mountain-range/primitives/Cargo.toml b/frame/merkle-mountain-range/primitives/Cargo.toml index bbf55a2b7089e..0531a295fafe3 100644 --- a/frame/merkle-mountain-range/primitives/Cargo.toml +++ b/frame/merkle-mountain-range/primitives/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-mmr-primitives" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 926cfd602f673..881cbb3c8ebfe 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-mmr-rpc" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index d1bd23dcab581..2b0d8f5cee792 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-multisig" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index dacec5567ede4..039fd7f97c851 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-nicks" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index 960f7e04688dd..450da1325f67e 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-node-authorization" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index 97f4644a83ca0..15939027d3be1 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-offences" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index f8459087cb7fb..cf628b6967ce0 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-offences-benchmarking" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 4f4cf2bf9d56d..030209b83cc6c 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-proxy" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index ba77312699172..38fe2f73bc71b 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-randomness-collective-flip" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 092940f5173f8..12198c135e536 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-recovery" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 862321dfa6f26..fab2bc2c635ca 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-scheduler" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Unlicense" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index a7d75ccacb96e..d60e55ae98963 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-scored-pool" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 32d298d3917c3..90de15632a0ea 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-session" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index 0d0868d439215..31a028679cfb9 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-session-benchmarking" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index ab2c379c51b5c..8a817f7e0060b 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-society" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 4b608bd91dc76..d9461ab454f39 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-staking" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index e44188bf7894e..15ff3a8727305 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-staking-reward-curve" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/staking/reward-fn/Cargo.toml b/frame/staking/reward-fn/Cargo.toml index ae0b7f50c994c..4e3be2a1bc719 100644 --- a/frame/staking/reward-fn/Cargo.toml +++ b/frame/staking/reward-fn/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-staking-reward-fn" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index ec34efe397f5a..7ca1cb1a4a61b 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -926,7 +926,6 @@ impl ElectionDataProvider> for Pallet #[cfg(feature = "runtime-benchmarks")] fn add_voter(voter: T::AccountId, weight: VoteWeight, targets: Vec) { - use sp_std::convert::TryFrom; let stake = >::try_from(weight).unwrap_or_else(|_| { panic!("cannot convert a VoteWeight into BalanceOf, benchmark needs reconfiguring.") }); @@ -981,7 +980,6 @@ impl ElectionDataProvider> for Pallet targets: Vec, target_stake: Option, ) { - use sp_std::convert::TryFrom; targets.into_iter().for_each(|v| { let stake: BalanceOf = target_stake .and_then(|w| >::try_from(w).ok()) @@ -1256,7 +1254,6 @@ impl VoteWeightProvider for Pallet { fn set_vote_weight_of(who: &T::AccountId, weight: VoteWeight) { // this will clearly results in an inconsistent state, but it should not matter for a // benchmark. - use sp_std::convert::TryInto; let active: BalanceOf = weight.try_into().map_err(|_| ()).unwrap(); let mut ledger = Self::ledger(who).unwrap_or_default(); ledger.active = active; diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index 3587a234566ec..969dc11dbb6c5 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-sudo" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index b62ae3384fe42..42981dc160a4b 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-support" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index ed152c25fc3b7..17128388874a8 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-support-procedural" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/support/procedural/src/pallet/expand/constants.rs b/frame/support/procedural/src/pallet/expand/constants.rs index 20106c71cbf07..0fe8e467c10da 100644 --- a/frame/support/procedural/src/pallet/expand/constants.rs +++ b/frame/support/procedural/src/pallet/expand/constants.rs @@ -52,7 +52,7 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { type_: const_.type_.clone(), doc: const_.doc.clone(), default_byte_impl: quote::quote!( - let value = <::#ident as + let value = <::#ident as #frame_support::traits::Get<#const_type>>::get(); #frame_support::codec::Encode::encode(&value) ), diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index 625c2d98baac5..69d6b461206c9 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -134,12 +134,12 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> Pallet<#type_use_gen> #completed_where_clause { #fn_vis fn deposit_event(event: Event<#event_use_gen>) { let event = < - ::Event as + ::Event as From> >::from(event); let event = < - ::Event as + ::Event as Into<::Event> >::into(event); diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index d2d1afb017736..daff8848364ee 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -75,12 +75,12 @@ fn decl_genesis_config_and_impl_default( #[serde(deny_unknown_fields)] #[serde(crate = #serde_crate)] #serde_bug_bound - pub struct GenesisConfig#genesis_struct_decl #genesis_where_clause { + pub struct GenesisConfig #genesis_struct_decl #genesis_where_clause { #( #config_fields )* } #[cfg(feature = "std")] - impl#genesis_impl Default for GenesisConfig#genesis_struct #genesis_where_clause { + impl #genesis_impl Default for GenesisConfig #genesis_struct #genesis_where_clause { fn default() -> Self { GenesisConfig { #( #config_field_defaults )* @@ -137,7 +137,7 @@ fn impl_build_storage( quote! { #[cfg(feature = "std")] - impl#genesis_impl GenesisConfig#genesis_struct #genesis_where_clause { + impl #genesis_impl GenesisConfig #genesis_struct #genesis_where_clause { /// Build the storage for this module. pub fn build_storage #fn_generic (&self) -> std::result::Result< #scrate::sp_runtime::Storage, @@ -161,7 +161,7 @@ fn impl_build_storage( } #[cfg(feature = "std")] - impl#build_storage_impl #build_storage_impl_trait for GenesisConfig#genesis_struct + impl #build_storage_impl #build_storage_impl_trait for GenesisConfig #genesis_struct #where_clause { fn build_module_genesis_storage( diff --git a/frame/support/procedural/src/storage/getters.rs b/frame/support/procedural/src/storage/getters.rs index 988e6fa096243..d877969232f84 100644 --- a/frame/support/procedural/src/storage/getters.rs +++ b/frame/support/procedural/src/storage/getters.rs @@ -92,7 +92,7 @@ pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { let where_clause = &def.where_clause; quote!( - impl#module_impl #module_struct #where_clause { + impl #module_impl #module_struct #where_clause { #getters } ) diff --git a/frame/support/procedural/src/storage/metadata.rs b/frame/support/procedural/src/storage/metadata.rs index a90e5051c5b2e..c49a0dafdf5d0 100644 --- a/frame/support/procedural/src/storage/metadata.rs +++ b/frame/support/procedural/src/storage/metadata.rs @@ -207,7 +207,7 @@ pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { quote!( #default_byte_getter_struct_defs - impl#module_impl #module_struct #where_clause { + impl #module_impl #module_struct #where_clause { #[doc(hidden)] pub fn storage_metadata() -> #scrate::metadata::PalletStorageMetadata { #store_metadata diff --git a/frame/support/procedural/src/storage/storage_info.rs b/frame/support/procedural/src/storage/storage_info.rs index 844896409f851..4b1d3347b4a43 100644 --- a/frame/support/procedural/src/storage/storage_info.rs +++ b/frame/support/procedural/src/storage/storage_info.rs @@ -48,7 +48,7 @@ pub fn impl_storage_info(def: &DeclStorageDefExt) -> TokenStream { let where_clause = &def.where_clause; quote!( - impl#module_impl #scrate::traits::StorageInfoTrait for #module_struct #where_clause { + impl #module_impl #scrate::traits::StorageInfoTrait for #module_struct #where_clause { fn storage_info() -> #scrate::sp_std::vec::Vec<#scrate::traits::StorageInfo> { let mut res = #scrate::sp_std::vec![]; #res_append_storage diff --git a/frame/support/procedural/src/storage/store_trait.rs b/frame/support/procedural/src/storage/store_trait.rs index 7dde92cf9a75d..5794c72d22c8d 100644 --- a/frame/support/procedural/src/storage/store_trait.rs +++ b/frame/support/procedural/src/storage/store_trait.rs @@ -48,7 +48,7 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { #visibility trait #store_trait { #decl_store_items } - impl#module_impl #store_trait for #module_struct #where_clause { + impl #module_impl #store_trait for #module_struct #where_clause { #impl_store_items } ) diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 6a8fb57b39bda..1207d06e6d591 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-support-procedural-tools" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index 9f4e2d9dca2a5..f0c27b5397d5d 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-support-procedural-tools-derive" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/support/procedural/tools/derive/src/lib.rs b/frame/support/procedural/tools/derive/src/lib.rs index 7922105895608..9ce88e7c47937 100644 --- a/frame/support/procedural/tools/derive/src/lib.rs +++ b/frame/support/procedural/tools/derive/src/lib.rs @@ -131,7 +131,7 @@ fn derive_totokens_enum(input: syn::ItemEnum) -> TokenStream { }; let field = fields_idents(v.fields.iter().map(Clone::clone)); quote! { - #ident::#v_ident#fields_build => { + #ident::#v_ident #fields_build => { #( #field.to_tokens(tokens); )* diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs index a59869c2fc9a3..06a4c759c46db 100644 --- a/frame/support/src/traits/members.rs +++ b/frame/support/src/traits/members.rs @@ -70,7 +70,7 @@ impl Contains for Tuple { /// to `matches!`. #[macro_export] macro_rules! match_type { - ( pub type $n:ident: impl Contains<$t:ty> = { $phead:pat $( | $ptail:pat )* } ; ) => { + ( pub type $n:ident: impl Contains<$t:ty> = { $phead:pat_param $( | $ptail:pat )* } ; ) => { pub struct $n; impl $crate::traits::Contains<$t> for $n { fn contains(l: &$t) -> bool { diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 77fd4f5620969..762c85f75c363 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-support-test" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" publish = false homepage = "https://substrate.io" @@ -23,7 +23,7 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../.. sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } -trybuild = "1.0.43" +trybuild = "1.0.52" pretty_assertions = "1.0.0" rustversion = "1.0.0" frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } @@ -36,13 +36,15 @@ std = [ "serde/std", "codec/std", "scale-info/std", - "sp-io/std", "frame-support/std", "frame-system/std", "sp-core/std", "sp-std/std", + "sp-io/std", "sp-runtime/std", "sp-state-machine", + "sp-arithmetic/std", + "sp-version/std", ] try-runtime = ["frame-support/try-runtime"] # WARNING: CI only execute pallet test with this feature, diff --git a/frame/support/test/compile_pass/Cargo.toml b/frame/support/test/compile_pass/Cargo.toml index bca833200d444..b8a64f4e7022a 100644 --- a/frame/support/test/compile_pass/Cargo.toml +++ b/frame/support/test/compile_pass/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-support-test-compile-pass" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" publish = false homepage = "https://substrate.dev" diff --git a/frame/support/test/pallet/Cargo.toml b/frame/support/test/pallet/Cargo.toml index a3d101967ae65..c26fdda4e9624 100644 --- a/frame/support/test/pallet/Cargo.toml +++ b/frame/support/test/pallet/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-support-test-pallet" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" publish = false homepage = "https://substrate.io" diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index dd171c5b12ec2..dcf739af614b8 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -166,25 +166,25 @@ pub mod pallet { T::AccountId: From + From + SomeAssociation1, { fn on_initialize(_: BlockNumberFor) -> Weight { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType2); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(10)); 10 } fn on_finalize(_: BlockNumberFor) { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType2); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(20)); } fn on_runtime_upgrade() -> Weight { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType2); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(30)); 30 } fn integrity_test() { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType2); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType2); // Test for where clause } } @@ -200,8 +200,8 @@ pub mod pallet { #[pallet::compact] _foo: u32, _bar: u32, ) -> DispatchResultWithPostInfo { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType3); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType3); // Test for where clause let _ = origin; Self::deposit_event(Event::Something(3)); Ok(().into()) @@ -268,7 +268,7 @@ pub mod pallet { where T::AccountId: From + From + SomeAssociation1, { - T::AccountId::from(SomeType7); // Test where clause works + let _ = T::AccountId::from(SomeType7); // Test where clause works 4u16 } @@ -352,8 +352,8 @@ pub mod pallet { T::AccountId: From + SomeAssociation1 + From, { fn build(&self) { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType4); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType4); // Test for where clause } } @@ -370,8 +370,8 @@ pub mod pallet { { type Call = Call; fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType5); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType5); // Test for where clause if matches!(call, Call::foo_transactional { .. }) { return Ok(ValidTransaction::default()) } @@ -390,8 +390,8 @@ pub mod pallet { const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(_data: &InherentData) -> Option { - T::AccountId::from(SomeType1); // Test for where clause - T::AccountId::from(SomeType6); // Test for where clause + let _ = T::AccountId::from(SomeType1); // Test for where clause + let _ = T::AccountId::from(SomeType6); // Test for where clause Some(Call::foo_no_post_info {}) } diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index dc69bd2d5e85f..c5d2144e28dd0 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-system" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 6aa2251f287d9..7e7c2f6e69f20 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-system-benchmarking" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index b5e569e1298ca..67e627f7dcf98 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index cd636ec6c23b5..2f07b2a0975a5 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-timestamp" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index a3f268169b784..805f1663d1ae3 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-tips" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index bea263cbef6f1..1d3066e39fbda 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-transaction-payment" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 1a2c68227c11f..8ce2e4991568c 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 7bdca2f658293..315f140fc4f23 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index bcd3fd145f575..7167ae7424571 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-transaction-storage" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Unlicense" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index bcbe41985c655..d375e22df949b 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-treasury" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml index 611a844278269..c947717953192 100644 --- a/frame/try-runtime/Cargo.toml +++ b/frame/try-runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-try-runtime" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml index f240bb98afab7..4b6d0485567c2 100644 --- a/frame/uniques/Cargo.toml +++ b/frame/uniques/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-uniques" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 0a0a9eafd845b..c55b20df27855 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-utility" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 3179607b3f6d7..035124f7d0de3 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-vesting" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index c57c3730fc7b6..65da54b87753d 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-api" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index d9dd0bf9020c7..7b0b55c8c3b53 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-api-proc-macro" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 510a2eeaa530a..34907d6197285 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -786,7 +786,7 @@ fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { quote!( #[cfg(any(feature = "std", test))] impl < #( #impl_generics, )* > #crate_::RuntimeApiInfo - for #trait_name < #( #ty_generics, )* > + for dyn #trait_name < #( #ty_generics, )* > { #id #version diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 82954d193e605..cb74f95d21b09 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -644,8 +644,6 @@ pub const fn serialize_runtime_api_info(id: [u8; 8], version: u32) -> [u8; RUNTI /// Deserialize the runtime API info serialized by [`serialize_runtime_api_info`]. pub fn deserialize_runtime_api_info(bytes: [u8; RUNTIME_API_INFO_SIZE]) -> ([u8; 8], u32) { - use sp_std::convert::TryInto; - let id: [u8; 8] = bytes[0..8] .try_into() .expect("the source slice size is equal to the dest array length; qed"); diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index faee5ebdc77db..8c274b386470a 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-api-test" version = "2.0.1" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" publish = false homepage = "https://substrate.io" @@ -21,7 +21,7 @@ sp-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } codec = { package = "parity-scale-codec", version = "2.0.0" } sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } -trybuild = "1.0.43" +trybuild = "1.0.52" rustversion = "1.0.0" [dev-dependencies] diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 88411d86392af..e715d2ed3d31c 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-application-crypto" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" description = "Provides facilities for generating application specific crypto wrapper types." license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index d10f011c4c603..f59f5d9a249cb 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-application-crypto-test" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" description = "Integration tests for application-crypto" license = "Apache-2.0" publish = false diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 92b16b895e3b5..e6fa1759774ed 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-arithmetic" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 5f2d5801ff995..d6b2088523773 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-arithmetic-fuzzer" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index d4c75dda352ca..22f3884b55021 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-authority-discovery" version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Authority discovery primitives" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index cee82ca77c375..a31a349611163 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-authorship" version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Authorship primitives" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index 633ac0e8fbcd1..83472f54c5135 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -2,7 +2,7 @@ name = "beefy-primitives" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" [dependencies] diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index 8499bdf8e1c70..6c6579c353298 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-block-builder" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 93daef5fa1a27..7af7807954ef1 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-blockchain" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index ca73cf206de02..361095c6218ff 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-aura" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 1690b7c9a02d3..d942f72909453 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-babe" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 9a5488abba653..735b1e8eb095d 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-consensus" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index 406ed3dea46a5..07863850a8f87 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-pow" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index 014ee9b93e6e8..ad83835e02706 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-slots" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for slots-based consensus" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml index c103e68eb66b2..8a99dc9432a0b 100644 --- a/primitives/consensus/vrf/Cargo.toml +++ b/primitives/consensus/vrf/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-vrf" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Primitives for VRF based consensus" -edition = "2018" +edition = "2021" license = "Apache-2.0" repository = "https://github.com/paritytech/substrate/" homepage = "https://substrate.io" diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 148f2343ee2b7..63ca358e68101 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-core" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/core/hashing/Cargo.toml b/primitives/core/hashing/Cargo.toml index 43c670b59b0a7..eeee40405344c 100644 --- a/primitives/core/hashing/Cargo.toml +++ b/primitives/core/hashing/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-core-hashing" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/core/hashing/proc-macro/Cargo.toml b/primitives/core/hashing/proc-macro/Cargo.toml index 6d83b50b8a296..452165b31cb90 100644 --- a/primitives/core/hashing/proc-macro/Cargo.toml +++ b/primitives/core/hashing/proc-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-core-hashing-proc-macro" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index c3d2d8ce99df9..94e16f3cfb3d5 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-database" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 954d0f89663fe..d1f1f81a9f143 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-debug-derive" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 5a1b6b5e73734..96199c5b4d222 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-externalities" version = "0.10.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate externalities abstraction" diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 5cf7ac6711a70..4b9fde85147ea 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-finality-grandpa" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index d99a4c1882222..b762a1596bf82 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -389,7 +389,6 @@ where { use sp_application_crypto::AppKey; use sp_core::crypto::Public; - use sp_std::convert::TryInto; let encoded = localized_payload(round, set_id, &message); let signature = SyncCryptoStore::sign_with( diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 04b662fb059f0..d52140d94ed31 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-inherents" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index df9a496a914be..2d7c50bdc25e4 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-io" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index 464abdb6cb1aa..3b5d916b47b64 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-keyring" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index 3a0532f1db313..e16ff4676c3b1 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-keystore" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/maybe-compressed-blob/Cargo.toml b/primitives/maybe-compressed-blob/Cargo.toml index cd124580ef19c..95ba9b3324127 100644 --- a/primitives/maybe-compressed-blob/Cargo.toml +++ b/primitives/maybe-compressed-blob/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-maybe-compressed-blob" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index 5ffaf76379bda..95f303a382526 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-npos-elections" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index 1d13d33a35e80..f9fce9d8744da 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/npos-elections/solution-type/Cargo.toml b/primitives/npos-elections/solution-type/Cargo.toml index 27d5d0bb1231c..f50150bdbd58b 100644 --- a/primitives/npos-elections/solution-type/Cargo.toml +++ b/primitives/npos-elections/solution-type/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-npos-elections-solution-type" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" @@ -26,4 +26,4 @@ scale-info = "1.0" sp-arithmetic = { path = "../../arithmetic", version = "4.0.0-dev" } # used by generate_solution_type: sp-npos-elections = { path = "..", version = "4.0.0-dev" } -trybuild = "1.0.43" +trybuild = "1.0.52" diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index c1e891acba955..c4da2b4f88921 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -4,7 +4,7 @@ name = "sp-offchain" version = "4.0.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index 890cc277bd849..0845e175c2377 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-panic-handler" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index af883e2199415..15f7aa2b3b896 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-rpc" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index fc8923cdb80a3..c640f02824f24 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-runtime-interface" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" @@ -31,7 +31,7 @@ sp-state-machine = { version = "0.10.0-dev", path = "../state-machine" } sp-core = { version = "4.0.0-dev", path = "../core" } sp-io = { version = "4.0.0-dev", path = "../io" } rustversion = "1.0.0" -trybuild = "1.0.43" +trybuild = "1.0.52" [features] default = [ "std" ] diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index dd08d03313396..958a95ceb3fcf 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-runtime-interface-proc-macro" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index a3c82de473abd..60ece9d78795f 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-runtime-interface-test-wasm-deprecated" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" build = "build.rs" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index 557b5b9bee89f..d8dcc8a470530 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-runtime-interface-test-wasm" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" build = "build.rs" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 26884d5cb729f..4c88438f08c66 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-runtime-interface-test" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" publish = false homepage = "https://substrate.io" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 475d2b769de39..9d2957e7aed1a 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-runtime" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index 80cd195c6f406..b1ee431042e7a 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-sandbox" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index cc438e9a1c148..5b2d499279a47 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-serializer" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 1e9ed6ec9b651..01b299091ed1c 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-session" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 8ea24760e2b8b..3be8f4aba1428 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-staking" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index bbe9728befd80..343151a50a927 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-state-machine" version = "0.10.0-dev" authors = ["Parity Technologies "] description = "Substrate State Machine" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index e4cacf60cc36f..34f27ae7256d1 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-std" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c7d23fcf70103..2413c45a7312e 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-storage" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" description = "Storage related primitives" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/primitives/tasks/Cargo.toml b/primitives/tasks/Cargo.toml index f91d15d2d40b3..f472ec88ea189 100644 --- a/primitives/tasks/Cargo.toml +++ b/primitives/tasks/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-tasks" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 75ce8b752d3ca..4d184c7d02e7f 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-test-primitives" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 32f4c53083435..552a3cb5e8d63 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-timestamp" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 46930a674f2c9..f4bfb1f15dad3 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-tracing" version = "4.0.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Instrumentation primitives and macros for Substrate." diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index f74fcb44fee2d..e4a407547971a 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-transaction-pool" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml index 536e2f201a104..c967497f4ff96 100644 --- a/primitives/transaction-storage-proof/Cargo.toml +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-transaction-storage-proof" version = "4.0.0-dev" authors = ["Parity Technologies "] description = "Transaction storage proof primitives" -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 66d8a1e47276e..6190df210403c 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" license = "Apache-2.0" -edition = "2018" +edition = "2021" homepage = "https://substrate.io" documentation = "https://docs.rs/sp-trie" readme = "README.md" diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 2a2c2698c74c3..af44aed6c5b21 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-version" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/version/proc-macro/Cargo.toml b/primitives/version/proc-macro/Cargo.toml index 587ca06bdc179..bf6e0bdec7a6e 100644 --- a/primitives/version/proc-macro/Cargo.toml +++ b/primitives/version/proc-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-version-proc-macro" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 73b47e563a5b5..339c4cf8bc8bd 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -2,7 +2,7 @@ name = "sp-wasm-interface" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index a9ffefa05df7f..6cb91bb589c6d 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-test-utils" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" @@ -18,4 +18,4 @@ tokio = { version = "1.10", features = ["macros", "time"] } [dev-dependencies] sc-service = { version = "0.10.0-dev", path = "../client/service" } -trybuild = { version = "1.0.43", features = [ "diff" ] } +trybuild = { version = "1.0.52", features = [ "diff" ] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 204b6ac435e07..a8b2e8f57ac52 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-test-client" version = "2.0.1" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index 2a1f52346840f..f195c0e419eac 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-test-utils-derive" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index eb6ca51ce2e5a..f02e079046336 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-test-runtime" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" build = "build.rs" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 75ebb8f23326c..fbc6aefdb850c 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-test-runtime-client" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 5a2983b058b04..402caa93d10d8 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index 9c9672fe8f5d8..071a82f3c769f 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-test-utils-test-crate" version = "0.1.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml index b5b115771b539..9299076bb1f68 100644 --- a/test-utils/test-runner/Cargo.toml +++ b/test-utils/test-runner/Cargo.toml @@ -2,7 +2,7 @@ name = "test-runner" version = "0.9.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" publish = false [dependencies] diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index 786e6f9002914..93611c7b5b017 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-build-script-utils" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 81fa1747a84d7..cdfce5592f2fa 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -2,7 +2,7 @@ name = "fork-tree" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index ccca30849f919..605a14e3adff7 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -2,7 +2,7 @@ name = "frame-benchmarking-cli" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index fcdbb215f91b2..9d14819337419 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-frame-cli" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml index 1bb53207f7d4c..03382878710b7 100644 --- a/utils/frame/generate-bags/Cargo.toml +++ b/utils/frame/generate-bags/Cargo.toml @@ -2,7 +2,7 @@ name = "generate-bags" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml index 68d3cad16de47..5029e049361c9 100644 --- a/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -2,7 +2,7 @@ name = "node-runtime-generate-bags" version = "3.0.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/generate-bags/src/lib.rs b/utils/frame/generate-bags/src/lib.rs index af9df4435bcab..540412ef04c4c 100644 --- a/utils/frame/generate-bags/src/lib.rs +++ b/utils/frame/generate-bags/src/lib.rs @@ -71,7 +71,6 @@ fn existential_weight( minimum_balance: u128, ) -> VoteWeight { use frame_support::traits::CurrencyToVote; - use std::convert::TryInto; T::CurrencyToVote::to_vote( minimum_balance diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 2b35402f8f63f..3e7c229ec4d65 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -2,7 +2,7 @@ name = "remote-externalities" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 2010d1e02f73f..0d21bdd6c0181 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -5,7 +5,7 @@ authors = [ "Parity Technologies ", "Andrew Dirksen ", ] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 85868836f0456..31a46b3902106 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -2,7 +2,7 @@ name = "substrate-frame-rpc-system" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index e922af971044c..154c522c1dfd0 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -2,7 +2,7 @@ name = "try-runtime-cli" version = "0.10.0-dev" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 4d218e233bcbc..78bd68ac9e3a5 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -4,7 +4,7 @@ name = "substrate-prometheus-endpoint" version = "0.9.0" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2018" +edition = "2021" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 92b1af753ef60..d8802c432f55d 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -3,7 +3,7 @@ name = "substrate-wasm-builder" version = "5.0.0-dev" authors = ["Parity Technologies "] description = "Utility for building WASM binaries" -edition = "2018" +edition = "2021" readme = "README.md" repository = "https://github.com/paritytech/substrate/" license = "Apache-2.0" From 3524f6e692d568fae60c26ec8156f3fb585532ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Nov 2021 19:58:49 +0000 Subject: [PATCH 050/162] Bump strum from 0.21.0 to 0.22.0 (#10045) Bumps [strum](https://github.com/Peternator7/strum) from 0.21.0 to 0.22.0. - [Release notes](https://github.com/Peternator7/strum/releases) - [Changelog](https://github.com/Peternator7/strum/blob/master/CHANGELOG.md) - [Commits](https://github.com/Peternator7/strum/commits) --- updated-dependencies: - dependency-name: strum dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 29 +++++++------------ client/beefy/Cargo.toml | 2 +- .../election-provider-multi-phase/Cargo.toml | 2 +- primitives/keyring/Cargo.toml | 2 +- 4 files changed, 13 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4a50fe3565715..ea54adf99e458 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -507,7 +507,7 @@ dependencies = [ "sp-core", "sp-keystore", "sp-runtime", - "strum 0.21.0", + "strum", "substrate-prometheus-endpoint", "thiserror", "wasm-timer", @@ -5450,7 +5450,7 @@ dependencies = [ "sp-std", "sp-tracing", "static_assertions", - "strum 0.21.0", + "strum", "strum_macros 0.21.1", ] @@ -9502,7 +9502,7 @@ dependencies = [ "lazy_static", "sp-core", "sp-runtime", - "strum 0.20.0", + "strum", ] [[package]] @@ -9998,27 +9998,18 @@ dependencies = [ [[package]] name = "strum" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" +checksum = "f7ac893c7d471c8a21f31cfe213ec4f6d9afeed25537c772e08ef3f005f8729e" dependencies = [ - "strum_macros 0.20.1", -] - -[[package]] -name = "strum" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" -dependencies = [ - "strum_macros 0.21.1", + "strum_macros 0.22.0", ] [[package]] name = "strum_macros" -version = "0.20.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" +checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" dependencies = [ "heck", "proc-macro2", @@ -10028,9 +10019,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.21.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" +checksum = "339f799d8b549e3744c7ac7feb216383e4005d94bdb22561b3ab8f3b808ae9fb" dependencies = [ "heck", "proc-macro2", diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 37dc4491e21d4..60f9fde030800 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -35,4 +35,4 @@ beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } [dev-dependencies] sc-network-test = { version = "0.8.0", path = "../network/test" } -strum = { version = "0.21", features = ["derive"] } +strum = { version = "0.22", features = ["derive"] } diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index 8a0f80da10829..63111f89e5d39 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -37,7 +37,7 @@ rand = { version = "0.7.3", default-features = false, optional = true, features "alloc", "small_rng", ] } -strum = { optional = true, version = "0.21.0" } +strum = { optional = true, version = "0.22.0" } strum_macros = { optional = true, version = "0.21.1" } [dev-dependencies] diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index 3b5d916b47b64..249e7d766e39e 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -18,4 +18,4 @@ targets = ["x86_64-unknown-linux-gnu"] sp-core = { version = "4.0.0-dev", path = "../core" } sp-runtime = { version = "4.0.0-dev", path = "../runtime" } lazy_static = "1.4.0" -strum = { version = "0.20.0", features = ["derive"] } +strum = { version = "0.22.0", features = ["derive"] } From 5ba810c93203487ac58cffc6e0d34cdda18e362c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Nov 2021 11:11:28 +0000 Subject: [PATCH 051/162] Bump wasmi from 0.9.0 to 0.9.1 (#10116) Bumps [wasmi](https://github.com/paritytech/wasmi) from 0.9.0 to 0.9.1. - [Release notes](https://github.com/paritytech/wasmi/releases) - [Commits](https://github.com/paritytech/wasmi/compare/v0.9.0...v0.9.1) --- updated-dependencies: - dependency-name: wasmi dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- client/executor/Cargo.toml | 2 +- client/executor/common/Cargo.toml | 2 +- client/executor/wasmi/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/sandbox/Cargo.toml | 2 +- primitives/wasm-interface/Cargo.toml | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) mode change 100755 => 100644 primitives/sandbox/Cargo.toml diff --git a/Cargo.lock b/Cargo.lock index ea54adf99e458..2257fa0a5b005 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11498,9 +11498,9 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ee05bba3d1d994652079893941a2ef9324d2b58a63c31b40678fb7eddd7a5a" +checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" dependencies = [ "downcast-rs", "errno", diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 028854992a094..ef66d8072b537 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -21,7 +21,7 @@ sp-tasks = { version = "4.0.0-dev", path = "../../primitives/tasks" } sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" } -wasmi = "0.9.0" +wasmi = "0.9.1" lazy_static = "1.4.0" sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-wasm-interface = { version = "4.0.0-dev", path = "../../primitives/wasm-interface" } diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index fc7d5a1528470..7ac747bf967bd 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] derive_more = "0.99.2" pwasm-utils = "0.18.0" codec = { package = "parity-scale-codec", version = "2.0.0" } -wasmi = "0.9.0" +wasmi = "0.9.1" sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" } diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 6311b0234866f..255a470b374da 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -wasmi = "0.9.0" +wasmi = "0.9.1" codec = { package = "parity-scale-codec", version = "2.0.0" } sc-executor-common = { version = "0.10.0-dev", path = "../common" } sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 63ca358e68101..8262393e653fe 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -27,7 +27,7 @@ primitive-types = { version = "0.10.1", default-features = false, features = [ "scale-info" ] } impl-serde = { version = "0.3.0", optional = true } -wasmi = { version = "0.9.0", optional = true } +wasmi = { version = "0.9.1", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.2.0", optional = true } diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml old mode 100755 new mode 100644 index b1ee431042e7a..8e7acff4ff7e8 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [target.'cfg(target_arch = "wasm32")'.dependencies] -wasmi = { version = "0.9.0", default-features = false, features = ["core"] } +wasmi = { version = "0.9.1", default-features = false, features = ["core"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] wasmi = "0.9.0" diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 339c4cf8bc8bd..c378b185e392c 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -wasmi = { version = "0.9.0", optional = true } +wasmi = { version = "0.9.1", optional = true } impl-trait-for-tuples = "0.2.1" sp-std = { version = "4.0.0-dev", path = "../std", default-features = false } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } From 6873fc69e2140f554b75242b7e66a5d6a846e772 Mon Sep 17 00:00:00 2001 From: Florian Franzen Date: Fri, 5 Nov 2021 12:26:49 +0100 Subject: [PATCH 052/162] Remove old Cargo.toml (#10188) --- client/finality-grandpa-warp-sync/Cargo.toml | 35 -------------------- 1 file changed, 35 deletions(-) delete mode 100644 client/finality-grandpa-warp-sync/Cargo.toml diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml deleted file mode 100644 index 6280add275628..0000000000000 --- a/client/finality-grandpa-warp-sync/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -description = "A request-response protocol for handling grandpa warp sync requests" -name = "sc-finality-grandpa-warp-sync" -version = "0.10.0-dev" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -derive_more = "0.99.11" -futures = "0.3.8" -log = "0.4.11" -prost = "0.8" -sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-finality-grandpa = { version = "0.10.0-dev", path = "../finality-grandpa" } -sc-network = { version = "0.10.0-dev", path = "../network" } -sc-service = { version = "0.10.0-dev", path = "../service" } -sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } -sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } - -[dev-dependencies] -finality-grandpa = { version = "0.14.4" } -rand = "0.8" -sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } -sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } -sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } From 8439a0bba4aeb69669510061e6aeeb106da9d651 Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 5 Nov 2021 21:52:34 +0900 Subject: [PATCH 053/162] Remove unnecessary allocations when crossing WASM FFI boundary (#10191) --- primitives/runtime-interface/src/impls.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 40f8e90479f95..0b9cdc26f4650 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -318,9 +318,8 @@ macro_rules! impl_traits_for_arrays { type SelfInstance = [u8; $n]; fn from_ffi_value(context: &mut dyn FunctionContext, arg: u32) -> Result<[u8; $n]> { - let data = context.read_memory(Pointer::new(arg), $n)?; let mut res = [0u8; $n]; - res.copy_from_slice(&data); + context.read_memory_into(Pointer::new(arg), &mut res)?; Ok(res) } } @@ -514,10 +513,8 @@ macro_rules! for_u128_i128 { type SelfInstance = $type; fn from_ffi_value(context: &mut dyn FunctionContext, arg: u32) -> Result<$type> { - let data = - context.read_memory(Pointer::new(arg), mem::size_of::<$type>() as u32)?; let mut res = [0u8; mem::size_of::<$type>()]; - res.copy_from_slice(&data); + context.read_memory_into(Pointer::new(arg), &mut res)?; Ok(<$type>::from_le_bytes(res)) } } From 7b8f8efde025c6ac8daed492d4d96dea3c563334 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Nov 2021 15:51:54 +0100 Subject: [PATCH 054/162] Bump syn from 1.0.80 to 1.0.81 (#10182) Bumps [syn](https://github.com/dtolnay/syn) from 1.0.80 to 1.0.81. - [Release notes](https://github.com/dtolnay/syn/releases) - [Commits](https://github.com/dtolnay/syn/compare/1.0.80...1.0.81) --- updated-dependencies: - dependency-name: syn dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- client/chain-spec/derive/Cargo.toml | 2 +- client/tracing/proc-macro/Cargo.toml | 2 +- frame/staking/reward-curve/Cargo.toml | 2 +- frame/support/procedural/Cargo.toml | 2 +- frame/support/procedural/tools/Cargo.toml | 2 +- frame/support/procedural/tools/derive/Cargo.toml | 2 +- primitives/api/proc-macro/Cargo.toml | 2 +- primitives/core/hashing/proc-macro/Cargo.toml | 2 +- primitives/debug-derive/Cargo.toml | 2 +- primitives/npos-elections/solution-type/Cargo.toml | 2 +- primitives/runtime-interface/proc-macro/Cargo.toml | 2 +- primitives/version/proc-macro/Cargo.toml | 2 +- test-utils/derive/Cargo.toml | 2 +- 14 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2257fa0a5b005..8d168230b528e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6780,9 +6780,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" dependencies = [ "unicode-xid", ] @@ -10286,9 +10286,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194" +checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" dependencies = [ "proc-macro2", "quote", diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index 386137d0667ad..25339eea14f12 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -18,6 +18,6 @@ proc-macro = true proc-macro-crate = "1.0.0" proc-macro2 = "1.0.29" quote = "1.0.10" -syn = "1.0.80" +syn = "1.0.81" [dev-dependencies] diff --git a/client/tracing/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml index 9c8579ee66a45..c51c8764e6772 100644 --- a/client/tracing/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.0.0" proc-macro2 = "1.0.29" quote = { version = "1.0.10", features = ["proc-macro"] } -syn = { version = "1.0.80", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "1.0.81", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 15ff3a8727305..1250bbcd39056 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.80", features = ["full", "visit"] } +syn = { version = "1.0.81", features = ["full", "visit"] } quote = "1.0.10" proc-macro2 = "1.0.29" proc-macro-crate = "1.0.0" diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 17128388874a8..b47cf7f97956a 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -19,7 +19,7 @@ frame-support-procedural-tools = { version = "4.0.0-dev", path = "./tools" } proc-macro2 = "1.0.29" quote = "1.0.10" Inflector = "0.11.4" -syn = { version = "1.0.80", features = ["full"] } +syn = { version = "1.0.81", features = ["full"] } [features] default = ["std"] diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 1207d06e6d591..7b957be184014 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -15,5 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"] frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" } proc-macro2 = "1.0.29" quote = "1.0.10" -syn = { version = "1.0.80", features = ["full", "visit", "extra-traits"] } +syn = { version = "1.0.81", features = ["full", "visit", "extra-traits"] } proc-macro-crate = "1.0.0" diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index f0c27b5397d5d..8327134b3fb40 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -17,4 +17,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.29" quote = { version = "1.0.10", features = ["proc-macro"] } -syn = { version = "1.0.80", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } +syn = { version = "1.0.81", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 7b0b55c8c3b53..a5df7aef322ab 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] quote = "1.0.10" -syn = { version = "1.0.80", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "1.0.81", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.29" blake2-rfc = { version = "0.2.18", default-features = false } proc-macro-crate = "1.0.0" diff --git a/primitives/core/hashing/proc-macro/Cargo.toml b/primitives/core/hashing/proc-macro/Cargo.toml index 452165b31cb90..89b225e6aaa5f 100644 --- a/primitives/core/hashing/proc-macro/Cargo.toml +++ b/primitives/core/hashing/proc-macro/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.77", features = ["full", "parsing"] } +syn = { version = "1.0.81", features = ["full", "parsing"] } quote = "1.0.6" proc-macro2 = "1.0.29" sp-core-hashing = { version = "4.0.0-dev", path = "../", default-features = false } diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index d1f1f81a9f143..a2f77b7591fe1 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.10" -syn = "1.0.80" +syn = "1.0.81" proc-macro2 = "1.0" [features] diff --git a/primitives/npos-elections/solution-type/Cargo.toml b/primitives/npos-elections/solution-type/Cargo.toml index f50150bdbd58b..5d8d3890577a7 100644 --- a/primitives/npos-elections/solution-type/Cargo.toml +++ b/primitives/npos-elections/solution-type/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.80", features = ["full", "visit"] } +syn = { version = "1.0.81", features = ["full", "visit"] } quote = "1.0" proc-macro2 = "1.0.29" proc-macro-crate = "1.0.0" diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 958a95ceb3fcf..cc5daa695bb0f 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.80", features = ["full", "visit", "fold", "extra-traits"] } +syn = { version = "1.0.81", features = ["full", "visit", "fold", "extra-traits"] } quote = "1.0.10" proc-macro2 = "1.0.29" Inflector = "0.11.4" diff --git a/primitives/version/proc-macro/Cargo.toml b/primitives/version/proc-macro/Cargo.toml index bf6e0bdec7a6e..79fb9b18a2381 100644 --- a/primitives/version/proc-macro/Cargo.toml +++ b/primitives/version/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] quote = "1.0.10" -syn = { version = "1.0.80", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "1.0.81", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.29" codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] } diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index f195c0e419eac..967fc1e87a36e 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -10,7 +10,7 @@ description = "Substrate test utilities macros" [dependencies] quote = "1.0.10" -syn = { version = "1.0.80", features = ["full"] } +syn = { version = "1.0.81", features = ["full"] } proc-macro-crate = "1.0.0" proc-macro2 = "1.0.29" From abca10f32a1844d4d05884c92ea2e08b17c6b8c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 6 Nov 2021 05:48:41 +0000 Subject: [PATCH 055/162] Bump strum_macros from 0.21.1 to 0.22.0 (#10200) Bumps [strum_macros](https://github.com/Peternator7/strum) from 0.21.1 to 0.22.0. - [Release notes](https://github.com/Peternator7/strum/releases) - [Changelog](https://github.com/Peternator7/strum/blob/master/CHANGELOG.md) - [Commits](https://github.com/Peternator7/strum/commits) --- updated-dependencies: - dependency-name: strum_macros dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 16 ++-------------- frame/election-provider-multi-phase/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d168230b528e..eeb1012b1d1d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5451,7 +5451,7 @@ dependencies = [ "sp-tracing", "static_assertions", "strum", - "strum_macros 0.21.1", + "strum_macros", ] [[package]] @@ -10002,19 +10002,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7ac893c7d471c8a21f31cfe213ec4f6d9afeed25537c772e08ef3f005f8729e" dependencies = [ - "strum_macros 0.22.0", -] - -[[package]] -name = "strum_macros" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", + "strum_macros", ] [[package]] diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index 63111f89e5d39..b37054a7bbddf 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -38,7 +38,7 @@ rand = { version = "0.7.3", default-features = false, optional = true, features "small_rng", ] } strum = { optional = true, version = "0.22.0" } -strum_macros = { optional = true, version = "0.21.1" } +strum_macros = { optional = true, version = "0.22.0" } [dev-dependencies] parking_lot = "0.11.0" From 0397c5bd745574c587ea853ae9472e8dd0a2d9fe Mon Sep 17 00:00:00 2001 From: Alan Sapede Date: Sat, 6 Nov 2021 12:49:53 -0400 Subject: [PATCH 056/162] Adds block production time in logs (#10205) --- client/basic-authorship/src/basic_authorship.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 0055254b67091..573601a9102c5 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -462,8 +462,9 @@ where }); info!( - "🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", + "🎁 Prepared block for proposing at {} ({} ms) [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", block.header().number(), + block_timer.elapsed().as_millis(), ::Hash::from(block.header().hash()), block.header().parent_hash(), block.extrinsics().len(), From 702fd839917f7b197770917ae5a64df0fc9593eb Mon Sep 17 00:00:00 2001 From: cheme Date: Sun, 7 Nov 2021 14:13:02 +0100 Subject: [PATCH 057/162] Fast sync child trie support. (#9239) * state machine proofs. * initial implementation * Remove todo. * Extend test and fix import. * fix no proof, with proof ko. * fix start at logic. * Restore response size. * Rework comments. * Add explicit ref * Use compact proof. * ref change * elaborato on empty change set condition. * KeyValueState renaming. * Do not add two time child trie with same root to sync reply. * rust format * Fix merge. * fix warnings and fmt * fmt * update protocol id to V2 --- client/api/src/backend.rs | 2 +- client/api/src/lib.rs | 2 +- client/api/src/proof_provider.rs | 37 +- client/consensus/common/src/block_import.rs | 2 +- client/network/src/protocol/sync/state.rs | 158 +++++-- client/network/src/schema/api.v1.proto | 20 +- client/network/src/state_request_handler.rs | 49 +- client/network/test/src/lib.rs | 7 + client/network/test/src/sync.rs | 43 +- client/service/src/client/client.rs | 216 +++++++-- primitives/state-machine/src/lib.rs | 448 ++++++++++++++++++- primitives/state-machine/src/trie_backend.rs | 6 +- test-utils/runtime/client/src/lib.rs | 5 + 13 files changed, 855 insertions(+), 140 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 8b5bd50ffa614..9dfe82a57ab3b 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -42,7 +42,7 @@ use std::{ sync::Arc, }; -pub use sp_state_machine::Backend as StateBackend; +pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 16935b1e846cf..f1c78f6603eb8 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -39,7 +39,7 @@ pub use proof_provider::*; pub use sp_blockchain as blockchain; pub use sp_blockchain::HeaderBackend; -pub use sp_state_machine::{ExecutionStrategy, StorageProof}; +pub use sp_state_machine::{CompactProof, ExecutionStrategy, StorageProof}; pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; /// Usage Information Provider interface diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 79444f0069232..75f9c55e134d2 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -17,8 +17,9 @@ // along with this program. If not, see . //! Proof utilities -use crate::{ChangesProof, StorageProof}; +use crate::{ChangesProof, CompactProof, StorageProof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_state_machine::{KeyValueStates, KeyValueStorageLevel}; use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey}; /// Interface for providing block proving utilities. @@ -71,31 +72,43 @@ pub trait ProofProvider { key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively, - /// building proofs until size limit is reached. Returns combined proof and the number of - /// collected keys. + /// Given a `BlockId` iterate over all storage values starting at `start_keys`. + /// Last `start_keys` element contains last accessed key value. + /// With multiple `start_keys`, first `start_keys` element is + /// the current storage key of of the last accessed child trie. + /// at last level the value to start at exclusively. + /// Proofs is build until size limit is reached and always include at + /// least one key following `start_keys`. + /// Returns combined proof and the numbers of collected keys. fn read_proof_collection( &self, id: &BlockId, - start_key: &[u8], + start_keys: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result<(StorageProof, u32)>; + ) -> sp_blockchain::Result<(CompactProof, u32)>; /// Given a `BlockId` iterate over all storage values starting at `start_key`. /// Returns collected keys and values. + /// Returns the collected keys values content of the top trie followed by the + /// collected keys values of child tries. + /// Only child tries with their root part of the collected content or + /// related to `start_key` are attached. + /// For each collected state a boolean indicates if state reach + /// end. fn storage_collection( &self, id: &BlockId, - start_key: &[u8], + start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result, Vec)>>; + ) -> sp_blockchain::Result>; /// Verify read storage proof for a set of keys. - /// Returns collected key-value pairs and a flag indicating if iteration is complete. + /// Returns collected key-value pairs and a the nested state + /// depth of current iteration or 0 if completed. fn verify_range_proof( &self, root: Block::Hash, - proof: StorageProof, - start_key: &[u8], - ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)>; + proof: CompactProof, + start_keys: &[Vec], + ) -> sp_blockchain::Result<(KeyValueStates, usize)>; } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index d828e54bc7e3e..5294db2396042 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -133,7 +133,7 @@ pub struct ImportedState { /// Target block hash. pub block: B::Hash, /// State keys and values. - pub state: Vec<(Vec, Vec)>, + pub state: sp_state_machine::KeyValueStates, } impl std::fmt::Debug for ImportedState { diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index e644ba1013e4d..43aa1c4629f0e 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -23,9 +23,11 @@ use crate::{ }; use codec::{Decode, Encode}; use log::debug; -use sc_client_api::StorageProof; +use sc_client_api::CompactProof; +use smallvec::SmallVec; +use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; /// State sync support. @@ -35,8 +37,8 @@ pub struct StateSync { target_block: B::Hash, target_header: B::Header, target_root: B::Hash, - last_key: Vec, - state: Vec<(Vec, Vec)>, + last_key: SmallVec<[Vec; 2]>, + state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, client: Arc>, imported_bytes: u64, @@ -61,8 +63,8 @@ impl StateSync { target_block: target.hash(), target_root: target.state_root().clone(), target_header: target, - last_key: Vec::default(), - state: Vec::default(), + last_key: SmallVec::default(), + state: HashMap::default(), complete: false, imported_bytes: 0, skip_proof, @@ -71,7 +73,7 @@ impl StateSync { /// Validate and import a state reponse. pub fn import(&mut self, response: StateResponse) -> ImportResult { - if response.entries.is_empty() && response.proof.is_empty() && !response.complete { + if response.entries.is_empty() && response.proof.is_empty() { debug!(target: "sync", "Bad state response"); return ImportResult::BadResponse } @@ -82,56 +84,135 @@ impl StateSync { let complete = if !self.skip_proof { debug!(target: "sync", "Importing state from {} trie nodes", response.proof.len()); let proof_size = response.proof.len() as u64; - let proof = match StorageProof::decode(&mut response.proof.as_ref()) { + let proof = match CompactProof::decode(&mut response.proof.as_ref()) { Ok(proof) => proof, Err(e) => { debug!(target: "sync", "Error decoding proof: {:?}", e); return ImportResult::BadResponse }, }; - let (values, complete) = - match self.client.verify_range_proof(self.target_root, proof, &self.last_key) { - Err(e) => { - debug!(target: "sync", "StateResponse failed proof verification: {:?}", e); - return ImportResult::BadResponse - }, - Ok(values) => values, - }; + let (values, completed) = match self.client.verify_range_proof( + self.target_root, + proof, + self.last_key.as_slice(), + ) { + Err(e) => { + debug!( + target: "sync", + "StateResponse failed proof verification: {:?}", + e, + ); + return ImportResult::BadResponse + }, + Ok(values) => values, + }; debug!(target: "sync", "Imported with {} keys", values.len()); - if let Some(last) = values.last().map(|(k, _)| k) { - self.last_key = last.clone(); - } + let complete = completed == 0; + if !complete && !values.update_last_key(completed, &mut self.last_key) { + debug!(target: "sync", "Error updating key cursor, depth: {}", completed); + }; - for (key, value) in values { - self.imported_bytes += key.len() as u64; - self.state.push((key, value)) + for values in values.0 { + let key_values = if values.state_root.is_empty() { + // Read child trie roots. + values + .key_values + .into_iter() + .filter(|key_value| { + if well_known_keys::is_child_storage_key(key_value.0.as_slice()) { + self.state + .entry(key_value.1.clone()) + .or_default() + .1 + .push(key_value.0.clone()); + false + } else { + true + } + }) + .collect() + } else { + values.key_values + }; + let mut entry = self.state.entry(values.state_root).or_default(); + if entry.0.len() > 0 && entry.1.len() > 1 { + // Already imported child_trie with same root. + // Warning this will not work with parallel download. + } else { + if entry.0.is_empty() { + for (key, _value) in key_values.iter() { + self.imported_bytes += key.len() as u64; + } + + entry.0 = key_values; + } else { + for (key, value) in key_values { + self.imported_bytes += key.len() as u64; + entry.0.push((key, value)) + } + } + } } self.imported_bytes += proof_size; complete } else { - debug!( - target: "sync", - "Importing state from {:?} to {:?}", - response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - ); - - if let Some(e) = response.entries.last() { - self.last_key = e.key.clone(); + let mut complete = true; + // if the trie is a child trie and one of its parent trie is empty, + // the parent cursor stays valid. + // Empty parent trie content only happens when all the response content + // is part of a single child trie. + if self.last_key.len() == 2 && response.entries[0].entries.len() == 0 { + // Do not remove the parent trie position. + self.last_key.pop(); + } else { + self.last_key.clear(); } - for StateEntry { key, value } in response.entries { - self.imported_bytes += (key.len() + value.len()) as u64; - self.state.push((key, value)) + for state in response.entries { + debug!( + target: "sync", + "Importing state from {:?} to {:?}", + state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + + if !state.complete { + if let Some(e) = state.entries.last() { + self.last_key.push(e.key.clone()); + } + complete = false; + } + let is_top = state.state_root.is_empty(); + let entry = self.state.entry(state.state_root).or_default(); + if entry.0.len() > 0 && entry.1.len() > 1 { + // Already imported child trie with same root. + } else { + let mut child_roots = Vec::new(); + for StateEntry { key, value } in state.entries { + // Skip all child key root (will be recalculated on import). + if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { + child_roots.push((value, key)); + } else { + self.imported_bytes += key.len() as u64; + entry.0.push((key, value)) + } + } + for (root, storage_key) in child_roots { + self.state.entry(root).or_default().1.push(storage_key); + } + } } - response.complete + complete }; if complete { self.complete = true; ImportResult::Import( self.target_block, self.target_header.clone(), - ImportedState { block: self.target_block, state: std::mem::take(&mut self.state) }, + ImportedState { + block: self.target_block.clone(), + state: std::mem::take(&mut self.state).into(), + }, ) } else { ImportResult::Continue @@ -142,7 +223,7 @@ impl StateSync { pub fn next_request(&self) -> StateRequest { StateRequest { block: self.target_block.encode(), - start: self.last_key.clone(), + start: self.last_key.clone().into_vec(), no_proof: self.skip_proof, } } @@ -164,7 +245,8 @@ impl StateSync { /// Returns state sync estimated progress. pub fn progress(&self) -> StateDownloadProgress { - let percent_done = (*self.last_key.get(0).unwrap_or(&0u8) as u32) * 100 / 256; + let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); + let percent_done = cursor as u32 * 100 / 256; StateDownloadProgress { percentage: percent_done, size: self.imported_bytes } } } diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto index c5333c7dcdbf1..b51137d1d51d4 100644 --- a/client/network/src/schema/api.v1.proto +++ b/client/network/src/schema/api.v1.proto @@ -74,22 +74,32 @@ message BlockData { message StateRequest { // Block header hash. bytes block = 1; - // Start from this key. Equivalent to if omitted. - bytes start = 2; // optional + // Start from this key. + // Multiple keys used for nested state start. + repeated bytes start = 2; // optional // if 'true' indicates that response should contain raw key-values, rather than proof. bool no_proof = 3; } message StateResponse { - // A collection of keys-values. Only populated if `no_proof` is `true` - repeated StateEntry entries = 1; + // A collection of keys-values states. Only populated if `no_proof` is `true` + repeated KeyValueStateEntry entries = 1; // If `no_proof` is false in request, this contains proof nodes. bytes proof = 2; +} + +// A key value state. +message KeyValueStateEntry { + // Root of for this level, empty length bytes + // if top level. + bytes state_root = 1; + // A collection of keys-values. + repeated StateEntry entries = 2; // Set to true when there are no more keys to return. bool complete = 3; } -// A key-value pair +// A key-value pair. message StateEntry { bytes key = 1; bytes value = 2; diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index d2e58ce955197..0d710c13af607 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -21,7 +21,7 @@ use crate::{ chain::Client, config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, - schema::v1::{StateEntry, StateRequest, StateResponse}, + schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse}, PeerId, ReputationChange, }; use codec::{Decode, Encode}; @@ -66,7 +66,7 @@ fn generate_protocol_name(protocol_id: &ProtocolId) -> String { let mut s = String::new(); s.push_str("/"); s.push_str(protocol_id.as_ref()); - s.push_str("/state/1"); + s.push_str("/state/2"); s } @@ -75,7 +75,7 @@ fn generate_protocol_name(protocol_id: &ProtocolId) -> String { struct SeenRequestsKey { peer: PeerId, block: B::Hash, - start: Vec, + start: Vec>, } #[allow(clippy::derive_hash_xor_eq)] @@ -169,10 +169,10 @@ impl StateRequestHandler { trace!( target: LOG_TARGET, - "Handling state request from {}: Block {:?}, Starting at {:?}, no_proof={}", + "Handling state request from {}: Block {:?}, Starting at {:x?}, no_proof={}", peer, request.block, - sp_core::hexdisplay::HexDisplay::from(&request.start), + &request.start, request.no_proof, ); @@ -180,36 +180,45 @@ impl StateRequestHandler { let mut response = StateResponse::default(); if !request.no_proof { - let (proof, count) = self.client.read_proof_collection( + let (proof, _count) = self.client.read_proof_collection( &BlockId::hash(block), - &request.start, + request.start.as_slice(), MAX_RESPONSE_BYTES, )?; response.proof = proof.encode(); - if count == 0 { - response.complete = true; - } } else { let entries = self.client.storage_collection( &BlockId::hash(block), - &request.start, + request.start.as_slice(), MAX_RESPONSE_BYTES, )?; - response.entries = - entries.into_iter().map(|(key, value)| StateEntry { key, value }).collect(); - if response.entries.is_empty() { - response.complete = true; - } + response.entries = entries + .into_iter() + .map(|(state, complete)| KeyValueStateEntry { + state_root: state.state_root, + entries: state + .key_values + .into_iter() + .map(|(key, value)| StateEntry { key, value }) + .collect(), + complete, + }) + .collect(); } trace!( target: LOG_TARGET, - "StateResponse contains {} keys, {}, proof nodes, complete={}, from {:?} to {:?}", + "StateResponse contains {} keys, {}, proof nodes, from {:?} to {:?}", response.entries.len(), response.proof.len(), - response.complete, - response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + response.entries.get(0).and_then(|top| top + .entries + .first() + .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))), + response.entries.get(0).and_then(|top| top + .entries + .last() + .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))), ); if let Some(value) = self.seen_requests.get_mut(&key) { // If this is the first time we have processed this request, we need to change diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index fb0012aaf5baf..084b09fd65f8f 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -697,6 +697,8 @@ pub struct FullPeerConfig { pub is_authority: bool, /// Syncing mode pub sync_mode: SyncMode, + /// Extra genesis storage. + pub extra_storage: Option, /// Enable transaction indexing. pub storage_chain: bool, } @@ -765,6 +767,11 @@ where (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), (None, false) => TestClientBuilder::with_default_backend(), }; + if let Some(storage) = config.extra_storage { + let genesis_extra_storage = test_client_builder.genesis_init_mut().extra_storage(); + *genesis_extra_storage = storage; + } + if matches!(config.sync_mode, SyncMode::Fast { .. } | SyncMode::Warp) { test_client_builder = test_client_builder.set_no_genesis(); } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index f3af7f8ff6fc3..ff62b5476d1e6 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1110,11 +1110,44 @@ fn syncs_state() { sp_tracing::try_init_simple(); for skip_proofs in &[false, true] { let mut net = TestNet::new(0); - net.add_full_peer_with_config(Default::default()); - net.add_full_peer_with_config(FullPeerConfig { - sync_mode: SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }, - ..Default::default() - }); + let mut genesis_storage: sp_core::storage::Storage = Default::default(); + genesis_storage.top.insert(b"additional_key".to_vec(), vec![1]); + let mut child_data: std::collections::BTreeMap, Vec> = Default::default(); + for i in 0u8..16 { + child_data.insert(vec![i; 5], vec![i; 33]); + } + let child1 = sp_core::storage::StorageChild { + data: child_data.clone(), + child_info: sp_core::storage::ChildInfo::new_default(b"child1"), + }; + let child3 = sp_core::storage::StorageChild { + data: child_data.clone(), + child_info: sp_core::storage::ChildInfo::new_default(b"child3"), + }; + for i in 22u8..33 { + child_data.insert(vec![i; 5], vec![i; 33]); + } + let child2 = sp_core::storage::StorageChild { + data: child_data.clone(), + child_info: sp_core::storage::ChildInfo::new_default(b"child2"), + }; + genesis_storage + .children_default + .insert(child1.child_info.storage_key().to_vec(), child1); + genesis_storage + .children_default + .insert(child2.child_info.storage_key().to_vec(), child2); + genesis_storage + .children_default + .insert(child3.child_info.storage_key().to_vec(), child3); + let mut config_one = FullPeerConfig::default(); + config_one.extra_storage = Some(genesis_storage.clone()); + net.add_full_peer_with_config(config_one); + let mut config_two = FullPeerConfig::default(); + config_two.extra_storage = Some(genesis_storage); + config_two.sync_mode = + SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }; + net.add_full_peer_with_config(config_two); net.peer(0).push_blocks(64, false); // Wait for peer 1 to sync header chain. net.block_until_sync(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 4e3cb0aaf234b..6ce2feb050759 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -64,7 +64,10 @@ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ convert_hash, - storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey}, + storage::{ + well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, + StorageKey, + }, ChangesTrieConfiguration, NativeOrEncoded, }; #[cfg(feature = "test-helpers")] @@ -78,11 +81,12 @@ use sp_runtime::{ BuildStorage, Justification, Justifications, }; use sp_state_machine::{ - key_changes, key_changes_proof, prove_child_read, prove_range_read_with_size, prove_read, - read_range_proof_check, Backend as StateBackend, ChangesTrieAnchorBlockId, - ChangesTrieConfigurationRange, ChangesTrieRootsStorage, ChangesTrieStorage, DBValue, + key_changes, key_changes_proof, prove_child_read, prove_range_read_with_child_with_size, + prove_read, read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend, + ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, + ChangesTrieStorage, DBValue, KeyValueStates, KeyValueStorageLevel, MAX_NESTED_TRIE_DEPTH, }; -use sp_trie::StorageProof; +use sp_trie::{CompactProof, StorageProof}; use std::{ collections::{BTreeMap, HashMap, HashSet}, marker::PhantomData, @@ -824,10 +828,37 @@ where Some((main_sc, child_sc)) }, sc_consensus::StorageChanges::Import(changes) => { - let storage = sp_storage::Storage { - top: changes.state.into_iter().collect(), - children_default: Default::default(), - }; + let mut storage = sp_storage::Storage::default(); + for state in changes.state.0.into_iter() { + if state.parent_storage_keys.len() == 0 && state.state_root.len() == 0 { + for (key, value) in state.key_values.into_iter() { + storage.top.insert(key, value); + } + } else { + for parent_storage in state.parent_storage_keys { + let storage_key = PrefixedStorageKey::new_ref(&parent_storage); + let storage_key = + match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + storage_key, + None => + return Err(Error::Backend( + "Invalid child storage key.".to_string(), + )), + }; + let entry = storage + .children_default + .entry(storage_key.to_vec()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: ChildInfo::new_default(storage_key), + }); + for (key, value) in state.key_values.iter() { + entry.data.insert(key.clone(), value.clone()); + } + } + } + } let state_root = operation.op.reset_storage(storage)?; if state_root != *import_headers.post().state_root() { @@ -1347,62 +1378,153 @@ where fn read_proof_collection( &self, id: &BlockId, - start_key: &[u8], + start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result<(StorageProof, u32)> { + ) -> sp_blockchain::Result<(CompactProof, u32)> { let state = self.state_at(id)?; - Ok(prove_range_read_with_size::<_, HashFor>( - state, - None, - None, - size_limit, - Some(start_key), - )?) + let root = state.storage_root(std::iter::empty()).0; + + let (proof, count) = prove_range_read_with_child_with_size::<_, HashFor>( + state, size_limit, start_key, + )?; + let proof = sp_trie::encode_compact::>>(proof, root) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; + Ok((proof, count)) } fn storage_collection( &self, id: &BlockId, - start_key: &[u8], + start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result, Vec)>> { + ) -> sp_blockchain::Result> { + if start_key.len() > MAX_NESTED_TRIE_DEPTH { + return Err(Error::Backend("Invalid start key.".to_string())) + } let state = self.state_at(id)?; - let mut current_key = start_key.to_vec(); - let mut total_size = 0; - let mut entries = Vec::new(); - while let Some(next_key) = state - .next_storage_key(¤t_key) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - { - let value = state - .storage(next_key.as_ref()) + let child_info = |storage_key: &Vec| -> sp_blockchain::Result { + let storage_key = PrefixedStorageKey::new_ref(&storage_key); + match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + Ok(ChildInfo::new_default(storage_key)), + None => Err(Error::Backend("Invalid child storage key.".to_string())), + } + }; + let mut current_child = if start_key.len() == 2 { + let start_key = start_key.get(0).expect("checked len"); + if let Some(child_root) = state + .storage(&start_key) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .unwrap_or_default(); - let size = value.len() + next_key.len(); - if total_size + size > size_limit && !entries.is_empty() { + { + Some((child_info(start_key)?, child_root)) + } else { + return Err(Error::Backend("Invalid root start key.".to_string())) + } + } else { + None + }; + let mut current_key = start_key.last().map(Clone::clone).unwrap_or(Vec::new()); + let mut total_size = 0; + let mut result = vec![( + KeyValueStorageLevel { + state_root: Vec::new(), + key_values: Vec::new(), + parent_storage_keys: Vec::new(), + }, + false, + )]; + + let mut child_roots = HashSet::new(); + loop { + let mut entries = Vec::new(); + let mut complete = true; + let mut switch_child_key = None; + while let Some(next_key) = if let Some(child) = current_child.as_ref() { + state + .next_child_storage_key(&child.0, ¤t_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + } else { + state + .next_storage_key(¤t_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + } { + let value = if let Some(child) = current_child.as_ref() { + state + .child_storage(&child.0, next_key.as_ref()) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .unwrap_or_default() + } else { + state + .storage(next_key.as_ref()) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .unwrap_or_default() + }; + let size = value.len() + next_key.len(); + if total_size + size > size_limit && !entries.is_empty() { + complete = false; + break + } + total_size += size; + + if current_child.is_none() && + sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) + { + if !child_roots.contains(value.as_slice()) { + child_roots.insert(value.clone()); + switch_child_key = Some((next_key.clone(), value.clone())); + entries.push((next_key.clone(), value)); + break + } + } + entries.push((next_key.clone(), value)); + current_key = next_key; + } + if let Some((child, child_root)) = switch_child_key.take() { + result[0].0.key_values.extend(entries.into_iter()); + current_child = Some((child_info(&child)?, child_root)); + current_key = Vec::new(); + } else if let Some((child, child_root)) = current_child.take() { + current_key = child.into_prefixed_storage_key().into_inner(); + result.push(( + KeyValueStorageLevel { + state_root: child_root, + key_values: entries, + parent_storage_keys: Vec::new(), + }, + complete, + )); + if !complete { + break + } + } else { + result[0].0.key_values.extend(entries.into_iter()); + result[0].1 = complete; break } - total_size += size; - entries.push((next_key.clone(), value)); - current_key = next_key; } - Ok(entries) + Ok(result) } fn verify_range_proof( &self, root: Block::Hash, - proof: StorageProof, - start_key: &[u8], - ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)> { - Ok(read_range_proof_check::>( - root, - proof, - None, - None, - None, - Some(start_key), - )?) + proof: CompactProof, + start_key: &[Vec], + ) -> sp_blockchain::Result<(KeyValueStates, usize)> { + let mut db = sp_state_machine::MemoryDB::>::new(&[]); + let _ = sp_trie::decode_compact::>, _, _>( + &mut db, + proof.iter_compact_encoded_nodes(), + Some(&root), + ) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; + let proving_backend = sp_state_machine::TrieBackend::new(db, root); + let state = read_range_proof_check_with_child_on_proving_backend::>( + &proving_backend, + start_key, + )?; + + Ok(state) } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index a724cf5c9a0b4..b0178021f3130 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -172,7 +172,7 @@ mod std_reexport { }; pub use sp_trie::{ trie_types::{Layout, TrieDBMut}, - DBValue, MemoryDB, StorageProof, TrieMut, + CompactProof, DBValue, MemoryDB, StorageProof, TrieMut, }; } @@ -181,15 +181,20 @@ mod execution { use super::*; use codec::{Codec, Decode, Encode}; use hash_db::Hasher; + use smallvec::SmallVec; use sp_core::{ hexdisplay::HexDisplay, - storage::ChildInfo, + storage::{ChildInfo, ChildType, PrefixedStorageKey}, traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; - use std::{collections::HashMap, fmt, panic::UnwindSafe, result}; - use tracing::{trace, warn}; + use std::{ + collections::{HashMap, HashSet}, + fmt, + panic::UnwindSafe, + result, + }; const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions @@ -742,6 +747,254 @@ mod execution { prove_read_on_trie_backend(trie_backend, keys) } + /// State machine only allows a single level + /// of child trie. + pub const MAX_NESTED_TRIE_DEPTH: usize = 2; + + /// Multiple key value state. + /// States are ordered by root storage key. + #[derive(PartialEq, Eq, Clone)] + pub struct KeyValueStates(pub Vec); + + /// A key value state at any storage level. + #[derive(PartialEq, Eq, Clone)] + pub struct KeyValueStorageLevel { + /// State root of the level, for + /// top trie it is as an empty byte array. + pub state_root: Vec, + /// Storage of parents, empty for top root or + /// when exporting (building proof). + pub parent_storage_keys: Vec>, + /// Pair of key and values from this state. + pub key_values: Vec<(Vec, Vec)>, + } + + impl From for KeyValueStates + where + I: IntoIterator, (Vec<(Vec, Vec)>, Vec>))>, + { + fn from(b: I) -> Self { + let mut result = Vec::new(); + for (state_root, (key_values, storage_paths)) in b.into_iter() { + result.push(KeyValueStorageLevel { + state_root, + key_values, + parent_storage_keys: storage_paths, + }) + } + KeyValueStates(result) + } + } + + impl KeyValueStates { + /// Return total number of key values in states. + pub fn len(&self) -> usize { + self.0.iter().fold(0, |nb, state| nb + state.key_values.len()) + } + + /// Update last keys accessed from this state. + pub fn update_last_key( + &self, + stopped_at: usize, + last: &mut SmallVec<[Vec; 2]>, + ) -> bool { + if stopped_at == 0 || stopped_at > MAX_NESTED_TRIE_DEPTH { + return false + } + match stopped_at { + 1 => { + let top_last = + self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + if let Some(top_last) = top_last { + match last.len() { + 0 => { + last.push(top_last); + return true + }, + 2 => { + last.pop(); + }, + _ => (), + } + // update top trie access. + last[0] = top_last; + return true + } else { + // No change in top trie accesses. + // Indicates end of reading of a child trie. + last.truncate(1); + return true + } + }, + 2 => { + let top_last = + self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + let child_last = + self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + + if let Some(child_last) = child_last { + if last.len() == 0 { + if let Some(top_last) = top_last { + last.push(top_last) + } else { + return false + } + } else if let Some(top_last) = top_last { + last[0] = top_last; + } + if last.len() == 2 { + last.pop(); + } + last.push(child_last); + return true + } else { + // stopped at level 2 so child last is define. + return false + } + }, + _ => (), + } + false + } + } + + /// Generate range storage read proof, with child tries + /// content. + /// A size limit is applied to the proof with the + /// exception that `start_at` and its following element + /// are always part of the proof. + /// If a key different than `start_at` is a child trie root, + /// the child trie content will be included in the proof. + pub fn prove_range_read_with_child_with_size( + backend: B, + size_limit: usize, + start_at: &[Vec], + ) -> Result<(StorageProof, u32), Box> + where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + { + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at) + } + + /// Generate range storage read proof, with child tries + /// content. + /// See `prove_range_read_with_child_with_size`. + pub fn prove_range_read_with_child_with_size_on_trie_backend( + trie_backend: &TrieBackend, + size_limit: usize, + start_at: &[Vec], + ) -> Result<(StorageProof, u32), Box> + where + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + { + if start_at.len() > MAX_NESTED_TRIE_DEPTH { + return Err(Box::new("Invalid start of range.")) + } + + let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let mut count = 0; + + let mut child_roots = HashSet::new(); + let (mut child_key, mut start_at) = if start_at.len() == 2 { + let storage_key = start_at.get(0).expect("Checked length.").clone(); + if let Some(state_root) = proving_backend + .storage(&storage_key) + .map_err(|e| Box::new(e) as Box)? + { + child_roots.insert(state_root.clone()); + } else { + return Err(Box::new("Invalid range start child trie key.")) + } + + (Some(storage_key), start_at.get(1).cloned()) + } else { + (None, start_at.get(0).cloned()) + }; + + loop { + let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() { + let storage_key = PrefixedStorageKey::new_ref(storage_key); + ( + Some(match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(Box::new("Invalid range start child trie key.")), + }), + 2, + ) + } else { + (None, 1) + }; + + let start_at_ref = start_at.as_ref().map(AsRef::as_ref); + let mut switch_child_key = None; + let mut first = start_at.is_some(); + let completed = proving_backend + .apply_to_key_values_while( + child_info.as_ref(), + None, + start_at_ref, + |key, value| { + if first { + if start_at_ref + .as_ref() + .map(|start| &key.as_slice() > start) + .unwrap_or(true) + { + first = false; + } + } + if first { + true + } else if depth < MAX_NESTED_TRIE_DEPTH && + sp_core::storage::well_known_keys::is_child_storage_key( + key.as_slice(), + ) { + count += 1; + if !child_roots.contains(value.as_slice()) { + child_roots.insert(value); + switch_child_key = Some(key); + false + } else { + // do not add two child trie with same root + true + } + } else if proving_backend.estimate_encoded_size() <= size_limit { + count += 1; + true + } else { + false + } + }, + false, + ) + .map_err(|e| Box::new(e) as Box)?; + + if switch_child_key.is_none() { + if depth == 1 { + break + } else { + if completed { + start_at = child_key.take(); + } else { + break + } + } + } else { + child_key = switch_child_key; + start_at = None; + } + } + Ok((proving_backend.extract_proof(), count)) + } + /// Generate range storage read proof. pub fn prove_range_read_with_size( backend: B, @@ -884,7 +1137,25 @@ mod execution { Ok(result) } - /// Check child storage range proof, generated by `prove_range_read` call. + /// Check storage range proof with child trie included, generated by + /// `prove_range_read_with_child_with_size` call. + /// + /// Returns key values contents and the depth of the pending state iteration + /// (0 if completed). + pub fn read_range_proof_check_with_child( + root: H::Out, + proof: StorageProof, + start_at: &[Vec], + ) -> Result<(KeyValueStates, usize), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let proving_backend = create_proof_check_backend::(root, proof)?; + read_range_proof_check_with_child_on_proving_backend(&proving_backend, start_at) + } + + /// Check child storage range proof, generated by `prove_range_read_with_size` call. pub fn read_range_proof_check( root: H::Out, proof: StorageProof, @@ -991,6 +1262,130 @@ mod execution { Err(e) => Err(Box::new(e) as Box), } } + + /// Check storage range proof on pre-created proving backend. + /// + /// See `read_range_proof_check_with_child`. + pub fn read_range_proof_check_with_child_on_proving_backend( + proving_backend: &TrieBackend, H>, + start_at: &[Vec], + ) -> Result<(KeyValueStates, usize), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let mut result = vec![KeyValueStorageLevel { + state_root: Default::default(), + key_values: Default::default(), + parent_storage_keys: Default::default(), + }]; + if start_at.len() > MAX_NESTED_TRIE_DEPTH { + return Err(Box::new("Invalid start of range.")) + } + + let mut child_roots = HashSet::new(); + let (mut child_key, mut start_at) = if start_at.len() == 2 { + let storage_key = start_at.get(0).expect("Checked length.").clone(); + let child_key = if let Some(state_root) = proving_backend + .storage(&storage_key) + .map_err(|e| Box::new(e) as Box)? + { + child_roots.insert(state_root.clone()); + Some((storage_key, state_root)) + } else { + return Err(Box::new("Invalid range start child trie key.")) + }; + + (child_key, start_at.get(1).cloned()) + } else { + (None, start_at.get(0).cloned()) + }; + + let completed = loop { + let (child_info, depth) = if let Some((storage_key, state_root)) = child_key.as_ref() { + result.push(KeyValueStorageLevel { + state_root: state_root.clone(), + key_values: Default::default(), + parent_storage_keys: Default::default(), + }); + + let storage_key = PrefixedStorageKey::new_ref(storage_key); + ( + Some(match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(Box::new("Invalid range start child trie key.")), + }), + 2, + ) + } else { + (None, 1) + }; + + let values = if child_info.is_some() { + &mut result.last_mut().expect("Added above").key_values + } else { + &mut result[0].key_values + }; + let start_at_ref = start_at.as_ref().map(AsRef::as_ref); + let mut switch_child_key = None; + let mut first = start_at.is_some(); + let completed = proving_backend + .apply_to_key_values_while( + child_info.as_ref(), + None, + start_at_ref, + |key, value| { + if first { + if start_at_ref + .as_ref() + .map(|start| &key.as_slice() > start) + .unwrap_or(true) + { + first = false; + } + } + if !first { + values.push((key.to_vec(), value.to_vec())); + } + if first { + true + } else if depth < MAX_NESTED_TRIE_DEPTH && + sp_core::storage::well_known_keys::is_child_storage_key( + key.as_slice(), + ) { + if child_roots.contains(value.as_slice()) { + // Do not add two chid trie with same root. + true + } else { + child_roots.insert(value.clone()); + switch_child_key = Some((key, value)); + false + } + } else { + true + } + }, + true, + ) + .map_err(|e| Box::new(e) as Box)?; + + if switch_child_key.is_none() { + if !completed { + break depth + } + if depth == 1 { + break 0 + } else { + start_at = child_key.take().map(|entry| entry.0); + } + } else { + child_key = switch_child_key; + start_at = None; + } + }; + Ok((KeyValueStates(result), completed)) + } } #[cfg(test)] @@ -1574,7 +1969,7 @@ mod tests { assert_eq!( local_result1.into_iter().collect::>(), - vec![(b"value3".to_vec(), Some(vec![142]))], + vec![(b"value3".to_vec(), Some(vec![142; 33]))], ); assert_eq!(local_result2.into_iter().collect::>(), vec![(b"value2".to_vec(), None)]); assert_eq!(local_result3.into_iter().collect::>(), vec![(b"dummy".to_vec(), None)]); @@ -1678,7 +2073,7 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); - // Alwasys contains at least some nodes. + // Always contains at least some nodes. assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); @@ -1723,6 +2118,45 @@ mod tests { assert_eq!(completed, true); } + #[test] + fn prove_range_with_child_works() { + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new(); + let trie_backend = remote_backend.as_trie_backend().unwrap(); + let max_iter = 1000; + let mut nb_loop = 0; + loop { + nb_loop += 1; + if max_iter == nb_loop { + panic!("Too many loop in prove range"); + } + let (proof, count) = prove_range_read_with_child_with_size_on_trie_backend( + trie_backend, + 1, + start_at.as_slice(), + ) + .unwrap(); + // Always contains at least some nodes. + assert!(proof.clone().into_memory_db::().drain().len() > 0); + assert!(count < 3); // when doing child we include parent and first child key. + + let (result, completed_depth) = read_range_proof_check_with_child::( + remote_root, + proof.clone(), + start_at.as_slice(), + ) + .unwrap(); + + if completed_depth == 0 { + break + } + assert!(result.update_last_key(completed_depth, &mut start_at)); + } + + assert_eq!(nb_loop, 10); + } + #[test] fn compact_multiple_child_trie() { // this root will be queried diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 7cb725a80503d..7f9a02e055251 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -281,8 +281,8 @@ pub mod tests { { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(b"value3", &[142]).expect("insert failed"); - trie.insert(b"value4", &[124]).expect("insert failed"); + trie.insert(b"value3", &[142; 33]).expect("insert failed"); + trie.insert(b"value4", &[124; 33]).expect("insert failed"); }; { @@ -319,7 +319,7 @@ pub mod tests { test_trie .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") .unwrap(), - Some(vec![142u8]), + Some(vec![142u8; 33]), ); // Change cache entry to check that caching is active. test_trie diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index bcfe93b6f7975..da92e0f37983c 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -137,6 +137,11 @@ impl GenesisParameters { pub fn set_wasm_code(&mut self, code: Vec) { self.wasm_code = Some(code); } + + /// Access extra genesis storage. + pub fn extra_storage(&mut self) -> &mut Storage { + &mut self.extra_storage + } } impl substrate_test_client::GenesisInit for GenesisParameters { From 2e8bb09da2dfbc1a21c159e777dc1cf0bceba56d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Sun, 7 Nov 2021 23:24:53 +0000 Subject: [PATCH 058/162] put `TryInto` and `TryFrom` in `sp_std::prelude` (#10183) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Switch to Rust 2021 * Update trybuild to fix errors * half baked * fix * remove unused import * remove more warnings Co-authored-by: Bastian Köcher --- frame/assets/src/lib.rs | 2 +- frame/aura/src/lib.rs | 2 +- frame/authority-discovery/src/lib.rs | 2 -- frame/babe/src/lib.rs | 2 +- frame/bags-list/remote-tests/src/lib.rs | 2 +- frame/bags-list/remote-tests/src/sanity_check.rs | 2 +- frame/benchmarking/src/analysis.rs | 1 - frame/contracts/rpc/src/lib.rs | 1 - frame/contracts/src/benchmarking/code.rs | 2 +- frame/contracts/src/benchmarking/mod.rs | 2 +- frame/contracts/src/wasm/mod.rs | 1 - frame/democracy/src/conviction.rs | 2 +- frame/democracy/src/tests/lock_voting.rs | 1 - frame/democracy/src/vote.rs | 2 +- frame/election-provider-multi-phase/src/benchmarking.rs | 4 ---- frame/election-provider-multi-phase/src/helpers.rs | 2 +- frame/election-provider-multi-phase/src/lib.rs | 2 +- frame/election-provider-multi-phase/src/mock.rs | 2 +- frame/election-provider-multi-phase/src/unsigned.rs | 2 +- frame/identity/src/lib.rs | 2 +- frame/im-online/src/lib.rs | 1 - frame/multisig/src/benchmarking.rs | 1 - frame/randomness-collective-flip/src/lib.rs | 2 +- frame/session/src/lib.rs | 1 - frame/staking/reward-curve/src/log.rs | 2 -- frame/staking/reward-fn/src/lib.rs | 1 - frame/staking/src/benchmarking.rs | 1 - frame/support/procedural/src/pallet/parse/config.rs | 1 - frame/support/src/storage/bounded_btree_map.rs | 1 - frame/support/src/storage/bounded_vec.rs | 3 +-- frame/support/src/storage/mod.rs | 1 - frame/system/src/offchain.rs | 6 +----- frame/transaction-payment/rpc/src/lib.rs | 2 +- frame/transaction-payment/src/lib.rs | 1 - frame/uniques/src/impl_nonfungibles.rs | 2 +- frame/uniques/src/tests.rs | 2 +- primitives/std/src/lib.rs | 2 ++ utils/frame/benchmarking-cli/src/writer.rs | 1 - utils/frame/frame-utilities-cli/src/pallet_id.rs | 1 - 39 files changed, 22 insertions(+), 48 deletions(-) diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 4176242c8394a..dfc81eadeb530 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -157,7 +157,7 @@ use sp_runtime::{ }, ArithmeticError, TokenError, }; -use sp_std::{borrow::Borrow, convert::TryInto, prelude::*}; +use sp_std::{borrow::Borrow, prelude::*}; #[cfg(feature = "std")] use frame_support::traits::GenesisBuild; diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index 4b5294835403a..a4e55f25df5f6 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -49,7 +49,7 @@ use sp_runtime::{ traits::{IsMember, Member, SaturatedConversion, Saturating, Zero}, RuntimeAppPublic, }; -use sp_std::{convert::TryFrom, vec::Vec}; +use sp_std::prelude::*; pub mod migrations; mod mock; diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 8fced0d18cff1..a6609860d7cf1 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -30,8 +30,6 @@ use frame_support::{ use sp_authority_discovery::AuthorityId; use sp_std::prelude::*; -use core::convert::TryFrom; - pub use pallet::*; #[frame_support::pallet] diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index c74bbf897ac7f..033d993f4e26d 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -31,7 +31,7 @@ use frame_support::{ weights::{Pays, Weight}, BoundedVec, WeakBoundedVec, }; -use sp_application_crypto::{Public, TryFrom}; +use sp_application_crypto::Public; use sp_runtime::{ generic::DigestItem, traits::{IsMember, One, SaturatedConversion, Saturating, Zero}, diff --git a/frame/bags-list/remote-tests/src/lib.rs b/frame/bags-list/remote-tests/src/lib.rs index e471c4c95bdbc..3d555eb1f1e35 100644 --- a/frame/bags-list/remote-tests/src/lib.rs +++ b/frame/bags-list/remote-tests/src/lib.rs @@ -17,7 +17,7 @@ //! Utilities for remote-testing pallet-bags-list. -use sp_std::convert::TryInto; +use sp_std::prelude::*; /// A common log target to use. pub const LOG_TARGET: &'static str = "runtime::bags-list::remote-tests"; diff --git a/frame/bags-list/remote-tests/src/sanity_check.rs b/frame/bags-list/remote-tests/src/sanity_check.rs index e5e9f45bac5f4..7282e7bad5e32 100644 --- a/frame/bags-list/remote-tests/src/sanity_check.rs +++ b/frame/bags-list/remote-tests/src/sanity_check.rs @@ -23,7 +23,7 @@ use frame_support::{ }; use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::traits::Block as BlockT; -use sp_std::convert::TryInto; +use sp_std::prelude::*; /// Execute the sanity check of the bags-list. pub async fn execute( diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 2bb20ebe2e7f8..5ffb6e93c8fc3 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -18,7 +18,6 @@ //! Tools for analyzing the benchmark results. use crate::BenchmarkResult; -use core::convert::TryFrom; use linregress::{FormulaRegressionBuilder, RegressionDataBuilder}; use std::collections::BTreeMap; diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index e0796af056540..c82cf44d97c7f 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -32,7 +32,6 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, }; -use std::convert::{TryFrom, TryInto}; pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index b24005ec58699..6b90381e7d353 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -36,7 +36,7 @@ use pwasm_utils::parity_wasm::{ use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; use sp_sandbox::{EnvironmentDefinitionBuilder, Memory}; -use sp_std::{borrow::ToOwned, convert::TryFrom, prelude::*}; +use sp_std::{borrow::ToOwned, prelude::*}; /// Pass to `create_code` in order to create a compiled `WasmModule`. /// diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 5c753c2d95558..665ec565c8e65 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -44,7 +44,7 @@ use sp_runtime::{ traits::{Bounded, Hash}, Perbill, }; -use sp_std::{convert::TryInto, default::Default, vec, vec::Vec}; +use sp_std::prelude::*; /// How many batches we do per API benchmark. const API_BENCHMARK_BATCHES: u32 = 20; diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 855cb6e45091f..565a424323ac6 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -1967,7 +1967,6 @@ mod tests { #[test] #[cfg(feature = "unstable-interface")] fn call_runtime_works() { - use std::convert::TryInto; let call = Call::System(frame_system::Call::remark { remark: b"Hello World".to_vec() }); let mut ext = MockExt::default(); let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs index b4f24c93bb40f..59a5eccc616c3 100644 --- a/frame/democracy/src/conviction.rs +++ b/frame/democracy/src/conviction.rs @@ -24,7 +24,7 @@ use sp_runtime::{ traits::{Bounded, CheckedDiv, CheckedMul, Zero}, RuntimeDebug, }; -use sp_std::{convert::TryFrom, result::Result}; +use sp_std::{prelude::*, result::Result}; /// A value denoting the strength of conviction of a vote. #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo)] diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index 8b80b39c14aab..2e1440d8f44bb 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -18,7 +18,6 @@ //! The tests for functionality concerning locking and lock-voting. use super::*; -use std::convert::TryFrom; fn aye(x: u8, balance: u64) -> AccountVote { AccountVote::Standard { diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index 03ca020ca0949..da74f7bd2fb64 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -24,7 +24,7 @@ use sp_runtime::{ traits::{Saturating, Zero}, RuntimeDebug, }; -use sp_std::{convert::TryFrom, prelude::*, result::Result}; +use sp_std::prelude::*; /// A number of lock periods, plus a vote, one way or the other. #[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)] diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index 9648b8e0f2465..d9db6c3090994 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -26,10 +26,6 @@ use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; use sp_arithmetic::{per_things::Percent, traits::One}; use sp_npos_elections::IndexAssignment; use sp_runtime::InnerOf; -use sp_std::{ - boxed::Box, - convert::{TryFrom, TryInto}, -}; const SEED: u32 = 999; diff --git a/frame/election-provider-multi-phase/src/helpers.rs b/frame/election-provider-multi-phase/src/helpers.rs index 72b1b23f27f3c..98a14a93a25e0 100644 --- a/frame/election-provider-multi-phase/src/helpers.rs +++ b/frame/election-provider-multi-phase/src/helpers.rs @@ -18,7 +18,7 @@ //! Some helper functions/macros for this crate. use super::{Config, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight}; -use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::*}; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; #[macro_export] macro_rules! log { diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index a7863fafa7747..80a13aa99fb70 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -255,7 +255,7 @@ use sp_runtime::{ }, DispatchError, PerThing, Perbill, RuntimeDebug, SaturatedConversion, }; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 1a65316be1f10..fbde6ad991706 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -40,7 +40,7 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, PerU16, }; -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 0ed9b5427b1ec..1770f4343a0a4 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -35,7 +35,7 @@ use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, DispatchError, SaturatedConversion, }; -use sp_std::{boxed::Box, cmp::Ordering, convert::TryFrom, vec::Vec}; +use sp_std::{cmp::Ordering, prelude::*}; /// Storage key used to store the last block number at which offchain worker ran. pub(crate) const OFFCHAIN_LAST_BLOCK: &[u8] = b"parity/multi-phase-unsigned-election"; diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index a91381f1edd8b..4d86efd27e534 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -80,7 +80,7 @@ pub mod weights; use frame_support::traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}; use sp_runtime::traits::{AppendZerosInput, Saturating, StaticLookup, Zero}; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index d76bbaaa2fd14..2c5a7633c3b4a 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -75,7 +75,6 @@ mod tests; pub mod weights; use codec::{Decode, Encode, MaxEncodedLen}; -use core::convert::TryFrom; use frame_support::{ traits::{ EstimateNextSessionRotation, Get, OneSessionHandler, ValidatorSet, diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 1390b6eebbe34..cb98d8954030d 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -20,7 +20,6 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use core::convert::TryInto; use frame_benchmarking::{account, benchmarks}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 1b1d5cb5cd823..a9abb2c9564df 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -71,7 +71,7 @@ use safe_mix::TripletMix; use codec::Encode; use frame_support::traits::Randomness; use sp_runtime::traits::{Hash, Saturating}; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; const RANDOM_MATERIAL_LEN: u32 = 81; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 7fe163e0dfeac..6779285ee3187 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -131,7 +131,6 @@ use sp_runtime::{ }; use sp_staking::SessionIndex; use sp_std::{ - convert::TryFrom, marker::PhantomData, ops::{Rem, Sub}, prelude::*, diff --git a/frame/staking/reward-curve/src/log.rs b/frame/staking/reward-curve/src/log.rs index c196aaaa31a93..248a1e3c36a6e 100644 --- a/frame/staking/reward-curve/src/log.rs +++ b/frame/staking/reward-curve/src/log.rs @@ -1,5 +1,3 @@ -use std::convert::TryInto; - /// Simple u32 power of 2 function - simply uses a bit shift macro_rules! pow2 { ($n:expr) => { diff --git a/frame/staking/reward-fn/src/lib.rs b/frame/staking/reward-fn/src/lib.rs index dd5e629b3984c..25e52051effa5 100644 --- a/frame/staking/reward-fn/src/lib.rs +++ b/frame/staking/reward-fn/src/lib.rs @@ -19,7 +19,6 @@ //! Useful function for inflation for nominated proof of stake. -use core::convert::TryFrom; use sp_arithmetic::{ biguint::BigUint, traits::{SaturatedConversion, Zero}, diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 220e8f1e6a24c..80630818de7e6 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -256,7 +256,6 @@ benchmarks! { } unbond { - use sp_std::convert::TryFrom; // clean up any existing state. clear_validators_and_nominators::(); diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 712c20ffc7b4c..9e0a0fd52cbfc 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -16,7 +16,6 @@ // limitations under the License. use super::helper; -use core::convert::TryFrom; use frame_support_procedural_tools::get_doc_literals; use quote::ToTokens; use syn::spanned::Spanned; diff --git a/frame/support/src/storage/bounded_btree_map.rs b/frame/support/src/storage/bounded_btree_map.rs index 404814cb81693..7a59206aeba0e 100644 --- a/frame/support/src/storage/bounded_btree_map.rs +++ b/frame/support/src/storage/bounded_btree_map.rs @@ -307,7 +307,6 @@ pub mod test { use super::*; use crate::Twox128; use sp_io::TestExternalities; - use sp_std::convert::TryInto; crate::parameter_types! { pub const Seven: u32 = 7; diff --git a/frame/support/src/storage/bounded_vec.rs b/frame/support/src/storage/bounded_vec.rs index e51c6cd734113..3b5e7bda1651c 100644 --- a/frame/support/src/storage/bounded_vec.rs +++ b/frame/support/src/storage/bounded_vec.rs @@ -28,7 +28,7 @@ use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -use sp_std::{convert::TryFrom, marker::PhantomData, prelude::*}; +use sp_std::{marker::PhantomData, prelude::*}; /// A bounded vector. /// @@ -349,7 +349,6 @@ pub mod test { use super::*; use crate::Twox128; use sp_io::TestExternalities; - use sp_std::convert::TryInto; crate::parameter_types! { pub const Seven: u32 = 7; diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 35552e08fef1e..69445932b869e 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -1400,7 +1400,6 @@ mod test { use super::*; use crate::{assert_ok, hash::Identity, Twox128}; use bounded_vec::BoundedVec; - use core::convert::{TryFrom, TryInto}; use generator::StorageValue as _; use sp_core::hashing::twox_128; use sp_io::TestExternalities; diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index ed758a2556b77..c4986e67319f0 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -62,11 +62,7 @@ use sp_runtime::{ app_crypto::RuntimeAppPublic, traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}, }; -use sp_std::{ - collections::btree_set::BTreeSet, - convert::{TryFrom, TryInto}, - prelude::{Box, Vec}, -}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; /// Marker struct used to flag using all supported keys to sign a payload. pub struct ForAll {} diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 945156d12a6a4..78230ee6e468f 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -31,7 +31,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, MaybeDisplay}, }; -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; #[rpc] pub trait TransactionPaymentApi { diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 59d94a823723b..64cd5d5290635 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -347,7 +347,6 @@ pub mod pallet { // given weight == u64, we build multipliers from `diff` of two weight values, which can // at most be maximum block weight. Make sure that this can fit in a multiplier without // loss. - use sp_std::convert::TryInto; assert!( ::max_value() >= Multiplier::checked_from_integer( diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index e68d2d4deecda..5394f02160e3c 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -23,7 +23,7 @@ use frame_support::{ BoundedSlice, }; use sp_runtime::{DispatchError, DispatchResult}; -use sp_std::convert::TryFrom; +use sp_std::prelude::*; impl, I: 'static> Inspect<::AccountId> for Pallet { type InstanceId = T::InstanceId; diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index 8a4f978b7f4f5..d23d694e949e2 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -21,7 +21,7 @@ use super::*; use crate::mock::*; use frame_support::{assert_noop, assert_ok, traits::Currency}; use pallet_balances::Error as BalancesError; -use sp_std::convert::TryInto; +use sp_std::prelude::*; fn assets() -> Vec<(u64, u32, u32)> { let mut r: Vec<_> = Account::::iter().map(|x| x.0).collect(); diff --git a/primitives/std/src/lib.rs b/primitives/std/src/lib.rs index 3af4d07ac6297..3d112a18edd0f 100644 --- a/primitives/std/src/lib.rs +++ b/primitives/std/src/lib.rs @@ -98,6 +98,8 @@ pub mod prelude { boxed::Box, clone::Clone, cmp::{Eq, PartialEq, Reverse}, + convert::{TryFrom, TryInto}, + iter::IntoIterator, vec::Vec, }; diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index ede5b2d1355a7..e7e8b42fd8a6d 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -17,7 +17,6 @@ // Outputs benchmark results to Rust files that can be ingested by the runtime. -use core::convert::TryInto; use std::{ collections::{HashMap, HashSet}, fs, diff --git a/utils/frame/frame-utilities-cli/src/pallet_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs index d173f52b39cd8..e4acdb2182f03 100644 --- a/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -24,7 +24,6 @@ use sc_cli::{ }; use sp_core::crypto::{unwrap_or_default_ss58_version, Ss58AddressFormat, Ss58Codec}; use sp_runtime::traits::AccountIdConversion; -use std::convert::{TryFrom, TryInto}; use structopt::StructOpt; /// The `palletid` command From 2cc0e23254acaa81c0890dd3fee734131aa7da82 Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Mon, 8 Nov 2021 07:29:51 +0800 Subject: [PATCH 059/162] Add serde support for `Slot` type (#10204) Add serde support for the `Slot` type as we'd like to use it directly in our RPC. --- Cargo.lock | 1 + primitives/consensus/slots/Cargo.toml | 2 ++ primitives/consensus/slots/src/lib.rs | 1 + 3 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index eeb1012b1d1d3..63a2d26db34e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9325,6 +9325,7 @@ version = "0.10.0-dev" dependencies = [ "parity-scale-codec", "scale-info", + "serde", "sp-arithmetic", "sp-runtime", ] diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index ad83835e02706..9177157bd5ed7 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -15,6 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0", features = ["derive"], optional = true } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../runtime" } sp-arithmetic = { version = "4.0.0-dev", default-features = false, path = "../../arithmetic" } @@ -23,6 +24,7 @@ default = ["std"] std = [ "codec/std", "scale-info/std", + "serde", "sp-runtime/std", "sp-arithmetic/std", ] diff --git a/primitives/consensus/slots/src/lib.rs b/primitives/consensus/slots/src/lib.rs index 89b57dca83082..72b3c95068e39 100644 --- a/primitives/consensus/slots/src/lib.rs +++ b/primitives/consensus/slots/src/lib.rs @@ -24,6 +24,7 @@ use scale_info::TypeInfo; /// Unit type wrapper that represents a slot. #[derive(Debug, Encode, MaxEncodedLen, Decode, Eq, Clone, Copy, Default, Ord, TypeInfo)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct Slot(u64); impl core::ops::Deref for Slot { From a524cfd579b5cfa00ee7aea475a3a08cb42dc160 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Nov 2021 00:31:11 +0100 Subject: [PATCH 060/162] Bump lru from 0.6.6 to 0.7.0 (#10194) Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.6.6 to 0.7.0. - [Release notes](https://github.com/jeromefroe/lru-rs/releases) - [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/jeromefroe/lru-rs/compare/0.6.6...0.7.0) --- updated-dependencies: - dependency-name: lru dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 17 +++++++++++++---- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- primitives/blockchain/Cargo.toml | 2 +- 4 files changed, 16 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 63a2d26db34e8..4794c1c0f5c41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3665,7 +3665,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "lru", + "lru 0.6.6", "minicbor", "rand 0.7.3", "smallvec 1.7.0", @@ -3999,6 +3999,15 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "lru" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c748cfe47cb8da225c37595b3108bea1c198c84aaae8ea0ba76d01dda9fc803" +dependencies = [ + "hashbrown 0.11.2", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -8203,7 +8212,7 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log 0.4.14", - "lru", + "lru 0.7.0", "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.8", @@ -8246,7 +8255,7 @@ dependencies = [ "futures-timer 3.0.2", "libp2p", "log 0.4.14", - "lru", + "lru 0.7.0", "quickcheck", "sc-network", "sp-runtime", @@ -9239,7 +9248,7 @@ version = "4.0.0-dev" dependencies = [ "futures 0.3.16", "log 0.4.14", - "lru", + "lru 0.7.0", "parity-scale-codec", "parking_lot 0.11.1", "sp-api", diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 1f0b5c313294d..e17d9601eafc3 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.9" futures-timer = "3.0.1" libp2p = { version = "0.39.1", default-features = false } log = "0.4.8" -lru = "0.6.6" +lru = "0.7.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } sc-network = { version = "0.10.0-dev", path = "../network" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 7c2905d5fe03a..19ae5dd97e425 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -36,7 +36,7 @@ hex = "0.4.0" ip_network = "0.4.0" linked-hash-map = "0.5.4" linked_hash_set = "0.1.3" -lru = "0.6.6" +lru = "0.7.0" log = "0.4.8" parking_lot = "0.11.1" pin-project = "1.0.8" diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 7af7807954ef1..de73af10966e1 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" -lru = "0.6.6" +lru = "0.7.0" parking_lot = "0.11.1" thiserror = "1.0.21" futures = "0.3.9" From e1c30d92f5bda94a44ba461d914fd4b21fa7c07e Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 8 Nov 2021 10:42:48 +0100 Subject: [PATCH 061/162] Speedup block import (#10211) --- client/service/src/chain_ops/import_blocks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index a408a06a8170e..e4096afacdd3b 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -43,10 +43,10 @@ use std::{ }; /// Number of blocks we will add to the queue before waiting for the queue to catch up. -const MAX_PENDING_BLOCKS: u64 = 1_024; +const MAX_PENDING_BLOCKS: u64 = 10_000; /// Number of milliseconds to wait until next poll. -const DELAY_TIME: u64 = 2_000; +const DELAY_TIME: u64 = 200; /// Number of milliseconds that must have passed between two updates. const TIME_BETWEEN_UPDATES: u64 = 3_000; From 3b2ce548c6e00183ab12ab086859325164263584 Mon Sep 17 00:00:00 2001 From: Koute Date: Mon, 8 Nov 2021 21:52:11 +0900 Subject: [PATCH 062/162] Refactor `sp-sandbox`; make sure both sandbox executors are always tested (#10173) * sp-sandbox: convert executors into normal `mod`s instead of using `include!` * sp-sandbox: run `cargo fmt` on `host_executor.rs` * sp-sandbox: abstract away the executors behind traits * sp_sandbox: always compile both executors when possible * sc-executor: make sure all sandbox tests run on both sandbox executors * sc-executor: fix brainfart: actually call into the sandbox through the trait * sc-runtime-test: fix cargo fmt * sc-runtime-test: deduplicate executor-specific sandbox test entrypoints * sc-executor: test each sandbox executor in a separate test * cargo fmt (Github's conflict resolving thingy broke indentation) --- Cargo.lock | 1 + client/executor/runtime-test/Cargo.toml | 1 + client/executor/runtime-test/src/lib.rs | 172 +++++++++++------- client/executor/src/integration_tests/mod.rs | 47 +++++ .../executor/src/integration_tests/sandbox.rs | 108 +++++++---- frame/contracts/src/benchmarking/code.rs | 5 +- frame/contracts/src/benchmarking/sandbox.rs | 5 +- frame/contracts/src/wasm/mod.rs | 9 +- frame/contracts/src/wasm/runtime.rs | 9 +- .../sandbox/{ => src}/embedded_executor.rs | 40 ++-- primitives/sandbox/{ => src}/host_executor.rs | 91 +++++---- primitives/sandbox/src/lib.rs | 89 ++++----- 12 files changed, 350 insertions(+), 227 deletions(-) rename primitives/sandbox/{ => src}/embedded_executor.rs (90%) rename primitives/sandbox/{ => src}/host_executor.rs (78%) diff --git a/Cargo.lock b/Cargo.lock index 4794c1c0f5c41..4b6cc99426b1c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8429,6 +8429,7 @@ dependencies = [ name = "sc-runtime-test" version = "2.0.0" dependencies = [ + "paste 1.0.4", "sp-core", "sp-io", "sp-runtime", diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 1e3b5e926b964..2c82a9705ceeb 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -19,6 +19,7 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../.. sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } +paste = "1.0.4" [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 2b5699fa3f77a..3ea6e2d7ed1aa 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -30,7 +30,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash}, }; #[cfg(not(feature = "std"))] -use sp_sandbox::Value; +use sp_sandbox::{SandboxEnvironmentBuilder, SandboxInstance, SandboxMemory, Value}; extern "C" { #[allow(dead_code)] @@ -183,61 +183,6 @@ sp_core::wasm_export_functions! { ).as_ref().to_vec() } - fn test_sandbox(code: Vec) -> bool { - execute_sandboxed(&code, &[]).is_ok() - } - - fn test_sandbox_args(code: Vec) -> bool { - execute_sandboxed( - &code, - &[ - Value::I32(0x12345678), - Value::I64(0x1234567887654321), - ], - ).is_ok() - } - - fn test_sandbox_return_val(code: Vec) -> bool { - let ok = match execute_sandboxed( - &code, - &[ - Value::I32(0x1336), - ] - ) { - Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, - _ => false, - }; - - ok - } - - fn test_sandbox_instantiate(code: Vec) -> u8 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let code = match sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - Ok(_) => 0, - Err(sp_sandbox::Error::Module) => 1, - Err(sp_sandbox::Error::Execution) => 2, - Err(sp_sandbox::Error::OutOfBounds) => 3, - }; - - code - } - - fn test_sandbox_get_global_val(code: Vec) -> i64 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - i - } else { - return 20; - }; - - match instance.get_global_val("test_global") { - Some(sp_sandbox::Value::I64(val)) => val, - None => 30, - _ => 40, - } - } - fn test_offchain_index_set() { sp_io::offchain_index::set(b"k", b"v"); } @@ -408,15 +353,112 @@ mod tasks { } } +/// A macro to define a test entrypoint for each available sandbox executor. +macro_rules! wasm_export_sandbox_test_functions { + ( + $( + fn $name:ident( + $( $arg_name:ident: $arg_ty:ty ),* $(,)? + ) $( -> $ret_ty:ty )? where T: SandboxInstance<$state:ty> $(,)? + { $( $fn_impl:tt )* } + )* + ) => { + $( + #[cfg(not(feature = "std"))] + fn $name( $($arg_name: $arg_ty),* ) $( -> $ret_ty )? where T: SandboxInstance<$state> { + $( $fn_impl )* + } + + paste::paste! { + sp_core::wasm_export_functions! { + fn [<$name _host>]( $($arg_name: $arg_ty),* ) $( -> $ret_ty )? { + $name::>( $( $arg_name ),* ) + } + + fn [<$name _embedded>]( $($arg_name: $arg_ty),* ) $( -> $ret_ty )? { + $name::>( $( $arg_name ),* ) + } + } + } + )* + }; +} + +wasm_export_sandbox_test_functions! { + fn test_sandbox(code: Vec) -> bool + where + T: SandboxInstance, + { + execute_sandboxed::(&code, &[]).is_ok() + } + + fn test_sandbox_args(code: Vec) -> bool + where + T: SandboxInstance, + { + execute_sandboxed::(&code, &[Value::I32(0x12345678), Value::I64(0x1234567887654321)]) + .is_ok() + } + + fn test_sandbox_return_val(code: Vec) -> bool + where + T: SandboxInstance, + { + let ok = match execute_sandboxed::(&code, &[Value::I32(0x1336)]) { + Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, + _ => false, + }; + + ok + } + + fn test_sandbox_instantiate(code: Vec) -> u8 + where + T: SandboxInstance<()>, + { + let env_builder = T::EnvironmentBuilder::new(); + let code = match T::new(&code, &env_builder, &mut ()) { + Ok(_) => 0, + Err(sp_sandbox::Error::Module) => 1, + Err(sp_sandbox::Error::Execution) => 2, + Err(sp_sandbox::Error::OutOfBounds) => 3, + }; + + code + } + + fn test_sandbox_get_global_val(code: Vec) -> i64 + where + T: SandboxInstance<()>, + { + let env_builder = T::EnvironmentBuilder::new(); + let instance = if let Ok(i) = T::new(&code, &env_builder, &mut ()) { + i + } else { + return 20 + }; + + match instance.get_global_val("test_global") { + Some(sp_sandbox::Value::I64(val)) => val, + None => 30, + _ => 40, + } + } +} + #[cfg(not(feature = "std"))] -fn execute_sandboxed( +struct State { + counter: u32, +} + +#[cfg(not(feature = "std"))] +fn execute_sandboxed( code: &[u8], args: &[Value], -) -> Result { - struct State { - counter: u32, - } - +) -> Result +where + T: sp_sandbox::SandboxInstance, +{ fn env_assert( _e: &mut State, args: &[Value], @@ -446,10 +488,10 @@ fn execute_sandboxed( let mut state = State { counter: 0 }; let env_builder = { - let mut env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let mut env_builder = T::EnvironmentBuilder::new(); env_builder.add_host_func("env", "assert", env_assert); env_builder.add_host_func("env", "inc_counter", env_inc_counter); - let memory = match sp_sandbox::Memory::new(1, Some(16)) { + let memory = match T::Memory::new(1, Some(16)) { Ok(m) => m, Err(_) => unreachable!( " @@ -462,7 +504,7 @@ fn execute_sandboxed( env_builder }; - let mut instance = sp_sandbox::Instance::new(code, &env_builder, &mut state)?; + let mut instance = T::new(code, &env_builder, &mut state)?; let result = instance.invoke("call", args, &mut state); result.map_err(|_| sp_sandbox::HostError) diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index fe964f47ba374..1cded769c6856 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -71,6 +71,53 @@ macro_rules! test_wasm_execution { }; } +/// A macro to run a given test for each available WASM execution method *and* for each +/// sandbox execution method. +#[macro_export] +macro_rules! test_wasm_execution_sandbox { + ($method_name:ident) => { + paste::item! { + #[test] + fn [<$method_name _interpreted_host_executor>]() { + $method_name(WasmExecutionMethod::Interpreted, "_host"); + } + + #[test] + fn [<$method_name _interpreted_embedded_executor>]() { + $method_name(WasmExecutionMethod::Interpreted, "_embedded"); + } + + #[test] + #[cfg(feature = "wasmtime")] + fn [<$method_name _compiled_host_executor>]() { + $method_name(WasmExecutionMethod::Compiled, "_host"); + } + + #[test] + #[cfg(feature = "wasmtime")] + fn [<$method_name _compiled_embedded_executor>]() { + $method_name(WasmExecutionMethod::Compiled, "_embedded"); + } + } + }; + + (interpreted_only $method_name:ident) => { + paste::item! { + #[test] + fn [<$method_name _interpreted_host_executor>]() { + $method_name(WasmExecutionMethod::Interpreted, "_host"); + } + } + + paste::item! { + #[test] + fn [<$method_name _interpreted_embedded_executor>]() { + $method_name(WasmExecutionMethod::Interpreted, "_embedded"); + } + } + }; +} + fn call_in_wasm( function: &str, call_data: &[u8], diff --git a/client/executor/src/integration_tests/sandbox.rs b/client/executor/src/integration_tests/sandbox.rs index aacd493297cc8..2b536f541f088 100644 --- a/client/executor/src/integration_tests/sandbox.rs +++ b/client/executor/src/integration_tests/sandbox.rs @@ -17,12 +17,12 @@ // along with this program. If not, see . use super::{call_in_wasm, TestExternalities}; -use crate::{test_wasm_execution, WasmExecutionMethod}; +use crate::{test_wasm_execution_sandbox, WasmExecutionMethod}; use codec::Encode; -test_wasm_execution!(sandbox_should_work); -fn sandbox_should_work(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(sandbox_should_work); +fn sandbox_should_work(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -51,11 +51,14 @@ fn sandbox_should_work(wasm_method: WasmExecutionMethod) { .unwrap() .encode(); - assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext).unwrap(), true.encode()); + assert_eq!( + call_in_wasm(&format!("test_sandbox{}", fn_suffix), &code, wasm_method, &mut ext).unwrap(), + true.encode() + ); } -test_wasm_execution!(sandbox_trap); -fn sandbox_trap(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(sandbox_trap); +fn sandbox_trap(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -72,11 +75,14 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { ) .unwrap(); - assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext).unwrap(), vec![0]); + assert_eq!( + call_in_wasm(&format!("test_sandbox{}", fn_suffix), &code, wasm_method, &mut ext).unwrap(), + vec![0] + ); } -test_wasm_execution!(start_called); -fn start_called(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(start_called); +fn start_called(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -111,11 +117,14 @@ fn start_called(wasm_method: WasmExecutionMethod) { .unwrap() .encode(); - assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext).unwrap(), true.encode()); + assert_eq!( + call_in_wasm(&format!("test_sandbox{}", fn_suffix), &code, wasm_method, &mut ext).unwrap(), + true.encode() + ); } -test_wasm_execution!(invoke_args); -fn invoke_args(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(invoke_args); +fn invoke_args(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -147,13 +156,14 @@ fn invoke_args(wasm_method: WasmExecutionMethod) { .encode(); assert_eq!( - call_in_wasm("test_sandbox_args", &code, wasm_method, &mut ext,).unwrap(), + call_in_wasm(&format!("test_sandbox_args{}", fn_suffix), &code, wasm_method, &mut ext,) + .unwrap(), true.encode(), ); } -test_wasm_execution!(return_val); -fn return_val(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(return_val); +fn return_val(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -173,13 +183,19 @@ fn return_val(wasm_method: WasmExecutionMethod) { .encode(); assert_eq!( - call_in_wasm("test_sandbox_return_val", &code, wasm_method, &mut ext,).unwrap(), + call_in_wasm( + &format!("test_sandbox_return_val{}", fn_suffix), + &code, + wasm_method, + &mut ext, + ) + .unwrap(), true.encode(), ); } -test_wasm_execution!(unlinkable_module); -fn unlinkable_module(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(unlinkable_module); +fn unlinkable_module(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -197,13 +213,19 @@ fn unlinkable_module(wasm_method: WasmExecutionMethod) { .encode(); assert_eq!( - call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + call_in_wasm( + &format!("test_sandbox_instantiate{}", fn_suffix), + &code, + wasm_method, + &mut ext, + ) + .unwrap(), 1u8.encode(), ); } -test_wasm_execution!(corrupted_module); -fn corrupted_module(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(corrupted_module); +fn corrupted_module(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -211,13 +233,19 @@ fn corrupted_module(wasm_method: WasmExecutionMethod) { let code = vec![0u8, 0, 0, 0, 1, 0, 0, 0].encode(); assert_eq!( - call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + call_in_wasm( + &format!("test_sandbox_instantiate{}", fn_suffix), + &code, + wasm_method, + &mut ext, + ) + .unwrap(), 1u8.encode(), ); } -test_wasm_execution!(start_fn_ok); -fn start_fn_ok(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(start_fn_ok); +fn start_fn_ok(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -238,13 +266,19 @@ fn start_fn_ok(wasm_method: WasmExecutionMethod) { .encode(); assert_eq!( - call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + call_in_wasm( + &format!("test_sandbox_instantiate{}", fn_suffix), + &code, + wasm_method, + &mut ext, + ) + .unwrap(), 0u8.encode(), ); } -test_wasm_execution!(start_fn_traps); -fn start_fn_traps(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(start_fn_traps); +fn start_fn_traps(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -266,13 +300,19 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { .encode(); assert_eq!( - call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + call_in_wasm( + &format!("test_sandbox_instantiate{}", fn_suffix), + &code, + wasm_method, + &mut ext, + ) + .unwrap(), 2u8.encode(), ); } -test_wasm_execution!(get_global_val_works); -fn get_global_val_works(wasm_method: WasmExecutionMethod) { +test_wasm_execution_sandbox!(get_global_val_works); +fn get_global_val_works(wasm_method: WasmExecutionMethod, fn_suffix: &str) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); @@ -287,7 +327,13 @@ fn get_global_val_works(wasm_method: WasmExecutionMethod) { .encode(); assert_eq!( - call_in_wasm("test_sandbox_get_global_val", &code, wasm_method, &mut ext,).unwrap(), + call_in_wasm( + &format!("test_sandbox_get_global_val{}", fn_suffix), + &code, + wasm_method, + &mut ext, + ) + .unwrap(), 500i64.encode(), ); } diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 6b90381e7d353..98f52f4719a61 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -35,7 +35,10 @@ use pwasm_utils::parity_wasm::{ }; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; -use sp_sandbox::{EnvironmentDefinitionBuilder, Memory}; +use sp_sandbox::{ + default_executor::{EnvironmentDefinitionBuilder, Memory}, + SandboxEnvironmentBuilder, SandboxMemory, +}; use sp_std::{borrow::ToOwned, prelude::*}; /// Pass to `create_code` in order to create a compiled `WasmModule`. diff --git a/frame/contracts/src/benchmarking/sandbox.rs b/frame/contracts/src/benchmarking/sandbox.rs index 320ac90cce64e..4412542b547df 100644 --- a/frame/contracts/src/benchmarking/sandbox.rs +++ b/frame/contracts/src/benchmarking/sandbox.rs @@ -20,7 +20,10 @@ /// ! environment that provides the seal interface as imported functions. use super::{code::WasmModule, Config}; use sp_core::crypto::UncheckedFrom; -use sp_sandbox::{EnvironmentDefinitionBuilder, Instance, Memory}; +use sp_sandbox::{ + default_executor::{EnvironmentDefinitionBuilder, Instance, Memory}, + SandboxEnvironmentBuilder, SandboxInstance, +}; /// Minimal execution environment without any exported functions. pub struct Sandbox { diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 565a424323ac6..10aa0d19a04f7 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -36,6 +36,7 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::dispatch::DispatchError; use sp_core::crypto::UncheckedFrom; +use sp_sandbox::{SandboxEnvironmentBuilder, SandboxInstance, SandboxMemory}; use sp_std::prelude::*; #[cfg(test)] pub use tests::MockExt; @@ -182,8 +183,8 @@ where function: &ExportedFunction, input_data: Vec, ) -> ExecResult { - let memory = - sp_sandbox::Memory::new(self.initial, Some(self.maximum)).unwrap_or_else(|_| { + let memory = sp_sandbox::default_executor::Memory::new(self.initial, Some(self.maximum)) + .unwrap_or_else(|_| { // unlike `.expect`, explicit panic preserves the source location. // Needed as we can't use `RUST_BACKTRACE` in here. panic!( @@ -193,7 +194,7 @@ where ) }); - let mut imports = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let mut imports = sp_sandbox::default_executor::EnvironmentDefinitionBuilder::new(); imports.add_memory(self::prepare::IMPORT_MODULE_MEMORY, "memory", memory.clone()); runtime::Env::impls(&mut |module, name, func_ptr| { imports.add_host_func(module, name, func_ptr); @@ -209,7 +210,7 @@ where // Instantiate the instance from the instrumented module code and invoke the contract // entrypoint. - let result = sp_sandbox::Instance::new(&code, &imports, &mut runtime) + let result = sp_sandbox::default_executor::Instance::new(&code, &imports, &mut runtime) .and_then(|mut instance| instance.invoke(function.identifier(), &[], &mut runtime)); runtime.to_execution_result(result) diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 52b864bf18eac..883dfd0802483 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -32,6 +32,7 @@ use pwasm_utils::parity_wasm::elements::ValueType; use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::Bounded; +use sp_sandbox::SandboxMemory; use sp_std::prelude::*; /// Every error that can be returned to a contract when it calls any of the host functions. @@ -357,7 +358,7 @@ fn already_charged(_: u32) -> Option { pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, input_data: Option>, - memory: sp_sandbox::Memory, + memory: sp_sandbox::default_executor::Memory, trap_reason: Option, } @@ -367,7 +368,11 @@ where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { - pub fn new(ext: &'a mut E, input_data: Vec, memory: sp_sandbox::Memory) -> Self { + pub fn new( + ext: &'a mut E, + input_data: Vec, + memory: sp_sandbox::default_executor::Memory, + ) -> Self { Runtime { ext, input_data: Some(input_data), memory, trap_reason: None } } diff --git a/primitives/sandbox/embedded_executor.rs b/primitives/sandbox/src/embedded_executor.rs similarity index 90% rename from primitives/sandbox/embedded_executor.rs rename to primitives/sandbox/src/embedded_executor.rs index 678da3c3aeaf5..c521ff2cb63fb 100755 --- a/primitives/sandbox/embedded_executor.rs +++ b/primitives/sandbox/src/embedded_executor.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! An embedded WASM executor utilizing `wasmi`. + use super::{Error, HostError, HostFuncType, ReturnValue, Value, TARGET}; use alloc::string::String; use log::debug; @@ -27,13 +29,14 @@ use wasmi::{ RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, TrapKind, }; +/// The linear memory used by the sandbox. #[derive(Clone)] pub struct Memory { memref: MemoryRef, } -impl Memory { - pub fn new(initial: u32, maximum: Option) -> Result { +impl super::SandboxMemory for Memory { + fn new(initial: u32, maximum: Option) -> Result { Ok(Memory { memref: MemoryInstance::alloc( Pages(initial as usize), @@ -43,12 +46,12 @@ impl Memory { }) } - pub fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error> { + fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error> { self.memref.get_into(ptr, buf).map_err(|_| Error::OutOfBounds)?; Ok(()) } - pub fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error> { + fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error> { self.memref.set(ptr, value).map_err(|_| Error::OutOfBounds)?; Ok(()) } @@ -118,20 +121,21 @@ enum ExternVal { Memory(Memory), } +/// A builder for the environment of the sandboxed WASM module. pub struct EnvironmentDefinitionBuilder { map: BTreeMap<(Vec, Vec), ExternVal>, defined_host_functions: DefinedHostFunctions, } -impl EnvironmentDefinitionBuilder { - pub fn new() -> EnvironmentDefinitionBuilder { +impl super::SandboxEnvironmentBuilder for EnvironmentDefinitionBuilder { + fn new() -> EnvironmentDefinitionBuilder { EnvironmentDefinitionBuilder { map: BTreeMap::new(), defined_host_functions: DefinedHostFunctions::new(), } } - pub fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) + fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) where N1: Into>, N2: Into>, @@ -140,7 +144,7 @@ impl EnvironmentDefinitionBuilder { self.map.insert((module.into(), field.into()), ExternVal::HostFunc(idx)); } - pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) + fn add_memory(&mut self, module: N1, field: N2, mem: Memory) where N1: Into>, N2: Into>, @@ -213,14 +217,18 @@ impl ImportResolver for EnvironmentDefinitionBuilder { } } +/// Sandboxed instance of a WASM module. pub struct Instance { instance: ModuleRef, defined_host_functions: DefinedHostFunctions, _marker: PhantomData, } -impl Instance { - pub fn new( +impl super::SandboxInstance for Instance { + type Memory = Memory; + type EnvironmentBuilder = EnvironmentDefinitionBuilder; + + fn new( code: &[u8], env_def_builder: &EnvironmentDefinitionBuilder, state: &mut T, @@ -241,12 +249,7 @@ impl Instance { Ok(Instance { instance, defined_host_functions, _marker: PhantomData:: }) } - pub fn invoke( - &mut self, - name: &str, - args: &[Value], - state: &mut T, - ) -> Result { + fn invoke(&mut self, name: &str, args: &[Value], state: &mut T) -> Result { let args = args.iter().cloned().map(to_wasmi).collect::>(); let mut externals = @@ -260,7 +263,7 @@ impl Instance { } } - pub fn get_global_val(&self, name: &str) -> Option { + fn get_global_val(&self, name: &str) -> Option { let global = self.instance.export_by_name(name)?.as_global()?.get(); Some(to_interface(global)) @@ -289,7 +292,8 @@ fn to_interface(value: RuntimeValue) -> Value { #[cfg(test)] mod tests { - use crate::{EnvironmentDefinitionBuilder, Error, HostError, Instance, ReturnValue, Value}; + use super::{EnvironmentDefinitionBuilder, Instance}; + use crate::{Error, HostError, ReturnValue, SandboxEnvironmentBuilder, SandboxInstance, Value}; use assert_matches::assert_matches; fn execute_sandboxed(code: &[u8], args: &[Value]) -> Result { diff --git a/primitives/sandbox/host_executor.rs b/primitives/sandbox/src/host_executor.rs similarity index 78% rename from primitives/sandbox/host_executor.rs rename to primitives/sandbox/src/host_executor.rs index d2836e2ffd1eb..43484dd66a00d 100755 --- a/primitives/sandbox/host_executor.rs +++ b/primitives/sandbox/src/host_executor.rs @@ -15,15 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! A WASM executor utilizing the sandbox runtime interface of the host. + +use super::{Error, HostFuncType, ReturnValue, Value}; use codec::{Decode, Encode}; use sp_core::sandbox as sandbox_primitives; use sp_io::sandbox; -use sp_std::{prelude::*, slice, marker, mem, vec, rc::Rc}; -use super::{Error, Value, ReturnValue, HostFuncType}; +use sp_std::{marker, mem, prelude::*, rc::Rc, slice, vec}; mod ffi { - use sp_std::mem; use super::HostFuncType; + use sp_std::mem; /// Index into the default table that points to a `HostFuncType`. pub type HostFuncIndex = usize; @@ -38,8 +40,9 @@ mod ffi { pub unsafe fn coerce_host_index_to_func(idx: HostFuncIndex) -> HostFuncType { // We need to ensure that sizes of a callable function pointer and host function index is // indeed equal. - // We can't use `static_assertions` create because it makes compiler panic, fallback to runtime assert. - // const_assert!(mem::size_of::() == mem::size_of::>()); + // We can't use `static_assertions` create because it makes compiler panic, fallback to + // runtime assert. const_assert!(mem::size_of::() == + // mem::size_of::>()); assert!(mem::size_of::() == mem::size_of::>()); mem::transmute::>(idx) } @@ -55,6 +58,7 @@ impl Drop for MemoryHandle { } } +/// The linear memory used by the sandbox. #[derive(Clone)] pub struct Memory { // Handle to memory instance is wrapped to add reference-counting semantics @@ -62,29 +66,20 @@ pub struct Memory { handle: Rc, } -impl Memory { - pub fn new(initial: u32, maximum: Option) -> Result { - let maximum = if let Some(maximum) = maximum { - maximum - } else { - sandbox_primitives::MEM_UNLIMITED - }; +impl super::SandboxMemory for Memory { + fn new(initial: u32, maximum: Option) -> Result { + let maximum = + if let Some(maximum) = maximum { maximum } else { sandbox_primitives::MEM_UNLIMITED }; match sandbox::memory_new(initial, maximum) { sandbox_primitives::ERR_MODULE => Err(Error::Module), - memory_idx => Ok(Memory { - handle: Rc::new(MemoryHandle { memory_idx, }), - }), + memory_idx => Ok(Memory { handle: Rc::new(MemoryHandle { memory_idx }) }), } } - pub fn get(&self, offset: u32, buf: &mut [u8]) -> Result<(), Error> { - let result = sandbox::memory_get( - self.handle.memory_idx, - offset, - buf.as_mut_ptr(), - buf.len() as u32, - ); + fn get(&self, offset: u32, buf: &mut [u8]) -> Result<(), Error> { + let result = + sandbox::memory_get(self.handle.memory_idx, offset, buf.as_mut_ptr(), buf.len() as u32); match result { sandbox_primitives::ERR_OK => Ok(()), sandbox_primitives::ERR_OUT_OF_BOUNDS => Err(Error::OutOfBounds), @@ -92,11 +87,11 @@ impl Memory { } } - pub fn set(&self, offset: u32, val: &[u8]) -> Result<(), Error> { + fn set(&self, offset: u32, val: &[u8]) -> Result<(), Error> { let result = sandbox::memory_set( self.handle.memory_idx, offset, - val.as_ptr() as _ , + val.as_ptr() as _, val.len() as u32, ); match result { @@ -107,6 +102,7 @@ impl Memory { } } +/// A builder for the environment of the sandboxed WASM module. pub struct EnvironmentDefinitionBuilder { env_def: sandbox_primitives::EnvironmentDefinition, retained_memories: Vec, @@ -114,16 +110,6 @@ pub struct EnvironmentDefinitionBuilder { } impl EnvironmentDefinitionBuilder { - pub fn new() -> EnvironmentDefinitionBuilder { - EnvironmentDefinitionBuilder { - env_def: sandbox_primitives::EnvironmentDefinition { - entries: Vec::new(), - }, - retained_memories: Vec::new(), - _marker: marker::PhantomData::, - } - } - fn add_entry( &mut self, module: N1, @@ -140,8 +126,18 @@ impl EnvironmentDefinitionBuilder { }; self.env_def.entries.push(entry); } +} - pub fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) +impl super::SandboxEnvironmentBuilder for EnvironmentDefinitionBuilder { + fn new() -> EnvironmentDefinitionBuilder { + EnvironmentDefinitionBuilder { + env_def: sandbox_primitives::EnvironmentDefinition { entries: Vec::new() }, + retained_memories: Vec::new(), + _marker: marker::PhantomData::, + } + } + + fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) where N1: Into>, N2: Into>, @@ -150,7 +146,7 @@ impl EnvironmentDefinitionBuilder { self.add_entry(module, field, f); } - pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) + fn add_memory(&mut self, module: N1, field: N2, mem: Memory) where N1: Into>, N2: Into>, @@ -163,6 +159,7 @@ impl EnvironmentDefinitionBuilder { } } +/// Sandboxed instance of a WASM module. pub struct Instance { instance_idx: u32, _retained_memories: Vec, @@ -211,8 +208,11 @@ extern "C" fn dispatch_thunk( } } -impl Instance { - pub fn new( +impl super::SandboxInstance for Instance { + type Memory = Memory; + type EnvironmentBuilder = EnvironmentDefinitionBuilder; + + fn new( code: &[u8], env_def_builder: &EnvironmentDefinitionBuilder, state: &mut T, @@ -242,12 +242,7 @@ impl Instance { }) } - pub fn invoke( - &mut self, - name: &str, - args: &[Value], - state: &mut T, - ) -> Result { + fn invoke(&mut self, name: &str, args: &[Value], state: &mut T) -> Result { let serialized_args = args.to_vec().encode(); let mut return_val = vec![0u8; ReturnValue::ENCODED_MAX_SIZE]; @@ -262,16 +257,16 @@ impl Instance { match result { sandbox_primitives::ERR_OK => { - let return_val = ReturnValue::decode(&mut &return_val[..]) - .map_err(|_| Error::Execution)?; + let return_val = + ReturnValue::decode(&mut &return_val[..]).map_err(|_| Error::Execution)?; Ok(return_val) - } + }, sandbox_primitives::ERR_EXECUTION => Err(Error::Execution), _ => unreachable!(), } } - pub fn get_global_val(&self, name: &str) -> Option { + fn get_global_val(&self, name: &str) -> Option { sandbox::get_global_val(self.instance_idx, name) } } diff --git a/primitives/sandbox/src/lib.rs b/primitives/sandbox/src/lib.rs index 1724b4152ff3d..f1a24732b7a0a 100755 --- a/primitives/sandbox/src/lib.rs +++ b/primitives/sandbox/src/lib.rs @@ -48,13 +48,15 @@ pub use sp_wasm_interface::{ReturnValue, Value}; /// The target used for logging. const TARGET: &str = "runtime::sandbox"; -mod imp { - #[cfg(all(feature = "wasmer-sandbox", not(feature = "std")))] - include!("../host_executor.rs"); +pub mod embedded_executor; +#[cfg(not(feature = "std"))] +pub mod host_executor; - #[cfg(not(all(feature = "wasmer-sandbox", not(feature = "std"))))] - include!("../embedded_executor.rs"); -} +#[cfg(all(feature = "wasmer-sandbox", not(feature = "std")))] +pub use host_executor as default_executor; + +#[cfg(not(all(feature = "wasmer-sandbox", not(feature = "std"))))] +pub use embedded_executor as default_executor; /// Error that can occur while using this crate. #[derive(sp_core::RuntimeDebug)] @@ -88,12 +90,7 @@ pub type HostFuncType = fn(&mut T, &[Value]) -> Result) -> Result { - Ok(Memory { inner: imp::Memory::new(initial, maximum)? }) - } + fn new(initial: u32, maximum: Option) -> Result; /// Read a memory area at the address `ptr` with the size of the provided slice `buf`. /// /// Returns `Err` if the range is out-of-bounds. - pub fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error> { - self.inner.get(ptr, buf) - } + fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error>; /// Write a memory area at the address `ptr` with contents of the provided slice `buf`. /// /// Returns `Err` if the range is out-of-bounds. - pub fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error> { - self.inner.set(ptr, value) - } + fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error>; } /// Struct that can be used for defining an environment for a sandboxed module. /// /// The sandboxed module can access only the entities which were defined and passed /// to the module at the instantiation time. -pub struct EnvironmentDefinitionBuilder { - inner: imp::EnvironmentDefinitionBuilder, -} - -impl EnvironmentDefinitionBuilder { +pub trait SandboxEnvironmentBuilder: Sized { /// Construct a new `EnvironmentDefinitionBuilder`. - pub fn new() -> EnvironmentDefinitionBuilder { - EnvironmentDefinitionBuilder { inner: imp::EnvironmentDefinitionBuilder::new() } - } + fn new() -> Self; /// Register a host function in this environment definition. /// @@ -143,32 +128,28 @@ impl EnvironmentDefinitionBuilder { /// can import function passed here with any signature it wants. It can even import /// the same function (i.e. with same `module` and `field`) several times. It's up to /// the user code to check or constrain the types of signatures. - pub fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) + fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) where N1: Into>, - N2: Into>, - { - self.inner.add_host_func(module, field, f); - } + N2: Into>; /// Register a memory in this environment definition. - pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) + fn add_memory(&mut self, module: N1, field: N2, mem: Memory) where N1: Into>, - N2: Into>, - { - self.inner.add_memory(module, field, mem.inner); - } + N2: Into>; } /// Sandboxed instance of a wasm module. /// /// This instance can be used for invoking exported functions. -pub struct Instance { - inner: imp::Instance, -} +pub trait SandboxInstance: Sized { + /// The memory type used for this sandbox. + type Memory: SandboxMemory; + + /// The environment builder used to construct this sandbox. + type EnvironmentBuilder: SandboxEnvironmentBuilder; -impl Instance { /// Instantiate a module with the given [`EnvironmentDefinitionBuilder`]. It will /// run the `start` function (if it is present in the module) with the given `state`. /// @@ -177,13 +158,11 @@ impl Instance { /// will be returned. /// /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html - pub fn new( + fn new( code: &[u8], - env_def_builder: &EnvironmentDefinitionBuilder, - state: &mut T, - ) -> Result, Error> { - Ok(Instance { inner: imp::Instance::new(code, &env_def_builder.inner, state)? }) - } + env_def_builder: &Self::EnvironmentBuilder, + state: &mut State, + ) -> Result; /// Invoke an exported function with the given name. /// @@ -196,19 +175,15 @@ impl Instance { /// - If types of the arguments passed to the function doesn't match function signature then /// trap occurs (as if the exported function was called via call_indirect), /// - Trap occurred at the execution time. - pub fn invoke( + fn invoke( &mut self, name: &str, args: &[Value], - state: &mut T, - ) -> Result { - self.inner.invoke(name, args, state) - } + state: &mut State, + ) -> Result; /// Get the value from a global with the given `name`. /// /// Returns `Some(_)` if the global could be found. - pub fn get_global_val(&self, name: &str) -> Option { - self.inner.get_global_val(name) - } + fn get_global_val(&self, name: &str) -> Option; } From c6a9f648cfb432193c5df09e89ac06176c318971 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 8 Nov 2021 22:03:37 +0100 Subject: [PATCH 063/162] Offchain-worker: Accessing time is not HTTP (#10220) Accessing time related functionality is clearly not related to HTTP. So, this pr introduces a `TIME` capability. --- primitives/core/src/offchain/mod.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index dfe23c1ff8f18..8058d4d05d6ad 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -262,21 +262,23 @@ bitflags::bitflags! { /// Execution context extra capabilities. pub struct Capabilities: u32 { /// Access to transaction pool. - const TRANSACTION_POOL = 0b0000_0001; + const TRANSACTION_POOL = 0b0000_0000_0001; /// External http calls. - const HTTP = 0b0000_0010; + const HTTP = 0b0000_0000_0010; /// Keystore access. - const KEYSTORE = 0b0000_0100; + const KEYSTORE = 0b0000_0000_0100; /// Randomness source. - const RANDOMNESS = 0b0000_1000; + const RANDOMNESS = 0b0000_0000_1000; /// Access to opaque network state. - const NETWORK_STATE = 0b0001_0000; + const NETWORK_STATE = 0b0000_0001_0000; /// Access to offchain worker DB (read only). - const OFFCHAIN_DB_READ = 0b0010_0000; + const OFFCHAIN_DB_READ = 0b0000_0010_0000; /// Access to offchain worker DB (writes). - const OFFCHAIN_DB_WRITE = 0b0100_0000; + const OFFCHAIN_DB_WRITE = 0b0000_0100_0000; /// Manage the authorized nodes - const NODE_AUTHORIZATION = 0b1000_0000; + const NODE_AUTHORIZATION = 0b0000_1000_0000; + /// Access time related functionality + const TIME = 0b0001_0000_0000; } } @@ -541,12 +543,12 @@ impl Externalities for LimitedExternalities { } fn timestamp(&mut self) -> Timestamp { - self.check(Capabilities::HTTP, "timestamp"); + self.check(Capabilities::TIME, "timestamp"); self.externalities.timestamp() } fn sleep_until(&mut self, deadline: Timestamp) { - self.check(Capabilities::HTTP, "sleep_until"); + self.check(Capabilities::TIME, "sleep_until"); self.externalities.sleep_until(deadline) } From 2a601506cd4c53c3c526c61745c8887116e491be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 8 Nov 2021 22:05:39 +0100 Subject: [PATCH 064/162] Fix polkadot companion label CI check (#10198) * Fix polkadot companion label CI check * Update .github/workflows/polkadot-companion-labels.yml --- .github/workflows/polkadot-companion-labels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/polkadot-companion-labels.yml b/.github/workflows/polkadot-companion-labels.yml index 3c3987b5f4d56..0a5af09358524 100644 --- a/.github/workflows/polkadot-companion-labels.yml +++ b/.github/workflows/polkadot-companion-labels.yml @@ -14,7 +14,7 @@ jobs: with: authToken: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - contexts: 'continuous-integration/gitlab-check-polkadot-companion-build' + contexts: 'continuous-integration/gitlab-check-dependent-polkadot' timeout: 1800 notPresentTimeout: 3600 # It can take quite a while before the job starts on Gitlab when the CI queue is large failureStates: failure From 60b51e3f0365799e5ae0d4f8a89c7b0126a5f905 Mon Sep 17 00:00:00 2001 From: David Date: Mon, 8 Nov 2021 22:06:53 +0100 Subject: [PATCH 065/162] Remove the RPC client example (#10217) * Remove the RPC client example Might be slightly contentious but I think we want to migrate this example to `subxt` and redirect the community to use that for things like this. * Remove rpc-client example code from workspace --- Cargo.lock | 11 ------ Cargo.toml | 1 - bin/node/rpc-client/Cargo.toml | 20 ----------- bin/node/rpc-client/src/main.rs | 63 --------------------------------- 4 files changed, 95 deletions(-) delete mode 100644 bin/node/rpc-client/Cargo.toml delete mode 100644 bin/node/rpc-client/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 4b6cc99426b1c..ae8216ae3c722 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4657,17 +4657,6 @@ dependencies = [ "substrate-frame-rpc-system", ] -[[package]] -name = "node-rpc-client" -version = "2.0.0" -dependencies = [ - "futures 0.3.16", - "jsonrpc-core-client", - "node-primitives", - "sc-rpc", - "sp-tracing", -] - [[package]] name = "node-runtime" version = "3.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 32d10ca8978dd..07053a0ef3162 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,6 @@ members = [ "bin/node/executor", "bin/node/primitives", "bin/node/rpc", - "bin/node/rpc-client", "bin/node/runtime", "bin/node/testing", "bin/utils/chain-spec-builder", diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml deleted file mode 100644 index 5c822ef3ad31a..0000000000000 --- a/bin/node/rpc-client/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "node-rpc-client" -version = "2.0.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -futures = "0.3.16" -jsonrpc-core-client = { version = "18.0.0", default-features = false, features = [ - "http", -] } -node-primitives = { version = "2.0.0", path = "../primitives" } -sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } -sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } diff --git a/bin/node/rpc-client/src/main.rs b/bin/node/rpc-client/src/main.rs deleted file mode 100644 index 6d0b88799f54c..0000000000000 --- a/bin/node/rpc-client/src/main.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![warn(missing_docs)] - -//! Example substrate RPC client code. -//! -//! This module shows how you can write a Rust RPC client that connects to a running -//! substrate node and use statically typed RPC wrappers. - -use futures::{Future, TryFutureExt}; -use jsonrpc_core_client::{transports::http, RpcError}; -use node_primitives::Hash; -use sc_rpc::author::{hash::ExtrinsicOrHash, AuthorClient}; - -fn main() -> Result<(), RpcError> { - sp_tracing::try_init_simple(); - - futures::executor::block_on(async { - let uri = "http://localhost:9933"; - - http::connect(uri) - .and_then(|client: AuthorClient| remove_all_extrinsics(client)) - .await - }) -} - -/// Remove all pending extrinsics from the node. -/// -/// The example code takes `AuthorClient` and first: -/// 1. Calls the `pending_extrinsics` method to get all extrinsics in the pool. -/// 2. Then calls `remove_extrinsic` passing the obtained raw extrinsics. -/// -/// As the result of running the code the entire content of the transaction pool is going -/// to be removed and the extrinsics are going to be temporarily banned. -fn remove_all_extrinsics( - client: AuthorClient, -) -> impl Future> { - client - .pending_extrinsics() - .and_then(move |pending| { - client.remove_extrinsic( - pending.into_iter().map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())).collect(), - ) - }) - .map_ok(|removed| { - println!("Removed extrinsics: {:?}", removed); - }) -} From fae2ac6a92656a0006da3aca4bf19e4a0545946f Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Tue, 9 Nov 2021 19:59:55 +0800 Subject: [PATCH 066/162] frame_support::pallet_prelude: Add scale_info::TypeInfo (#10221) Signed-off-by: koushiro --- frame/assets/src/lib.rs | 1 - frame/assets/src/types.rs | 8 ++++---- frame/gilt/src/lib.rs | 1 - frame/support/src/lib.rs | 1 + 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index dfc81eadeb530..b89d411e41db8 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -170,7 +170,6 @@ pub mod pallet { use super::*; use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; - use scale_info::TypeInfo; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs index bc2edce848a64..879e9d5cdcb96 100644 --- a/frame/assets/src/types.rs +++ b/frame/assets/src/types.rs @@ -18,10 +18,10 @@ //! Various basic types for use in the assets pallet. use super::*; -use frame_support::pallet_prelude::*; -use scale_info::TypeInfo; - -use frame_support::traits::{fungible, tokens::BalanceConversion}; +use frame_support::{ + pallet_prelude::*, + traits::{fungible, tokens::BalanceConversion}, +}; use sp_runtime::{traits::Convert, FixedPointNumber, FixedPointOperand, FixedU128}; pub(super) type DepositBalanceOf = diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 393b3acb41a36..1594601b457cb 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -82,7 +82,6 @@ pub mod pallet { traits::{Currency, OnUnbalanced, ReservableCurrency}, }; use frame_system::pallet_prelude::*; - use scale_info::TypeInfo; use sp_arithmetic::{PerThing, Perquintill}; use sp_runtime::traits::{Saturating, Zero}; use sp_std::prelude::*; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 6e60988a4ca23..af9192f6ea836 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1330,6 +1330,7 @@ pub mod pallet_prelude { PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; pub use codec::{Decode, Encode, MaxEncodedLen}; + pub use scale_info::TypeInfo; pub use sp_runtime::{ traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, transaction_validity::{ From 5cb8a6d1e0b5a542667ce9220ec6c7cace76aeb8 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 9 Nov 2021 21:08:02 +0900 Subject: [PATCH 067/162] Bump `wasmtime` to 0.31.0 (#10149) * Bump `wasmtime` to 0.31.0 * Bump `itoa` to 0.4.8 * sc-executor-wasmtime: fix `SandboxContext::invoke` which I've broke * sc-executor-wasmtime: cargo fmt --- Cargo.lock | 209 ++++++++++++++++++--------- client/executor/wasmtime/Cargo.toml | 2 +- client/executor/wasmtime/src/host.rs | 19 +-- 3 files changed, 145 insertions(+), 85 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae8216ae3c722..8170b096de9dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -848,9 +848,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.67" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" dependencies = [ "jobserver", ] @@ -1068,11 +1068,11 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15013642ddda44eebcf61365b2052a23fd8b7314f90ba44aa059ec02643c5139" +checksum = "cc0cb7df82c8cf8f2e6a8dd394a0932a71369c160cc9b027dca414fced242513" dependencies = [ - "cranelift-entity 0.77.0", + "cranelift-entity 0.78.0", ] [[package]] @@ -1088,7 +1088,7 @@ dependencies = [ "cranelift-entity 0.68.0", "gimli 0.22.0", "log 0.4.14", - "regalloc", + "regalloc 0.0.31", "smallvec 1.7.0", "target-lexicon 0.11.2", "thiserror", @@ -1096,17 +1096,17 @@ dependencies = [ [[package]] name = "cranelift-codegen" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298f2a7ed5fdcb062d8e78b7496b0f4b95265d20245f2d0ca88f846dd192a3a3" +checksum = "fe4463c15fa42eee909e61e5eac4866b7c6d22d0d8c621e57a0c5380753bfa8c" dependencies = [ - "cranelift-bforest 0.77.0", - "cranelift-codegen-meta 0.77.0", - "cranelift-codegen-shared 0.77.0", - "cranelift-entity 0.77.0", + "cranelift-bforest 0.78.0", + "cranelift-codegen-meta 0.78.0", + "cranelift-codegen-shared 0.78.0", + "cranelift-entity 0.78.0", "gimli 0.25.0", "log 0.4.14", - "regalloc", + "regalloc 0.0.32", "smallvec 1.7.0", "target-lexicon 0.12.0", ] @@ -1123,12 +1123,12 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cf504261ac62dfaf4ffb3f41d88fd885e81aba947c1241275043885bc5f0bac" +checksum = "793f6a94a053a55404ea16e1700202a88101672b8cd6b4df63e13cde950852bf" dependencies = [ - "cranelift-codegen-shared 0.77.0", - "cranelift-entity 0.77.0", + "cranelift-codegen-shared 0.78.0", + "cranelift-entity 0.78.0", ] [[package]] @@ -1139,9 +1139,9 @@ checksum = "6759012d6d19c4caec95793f052613e9d4113e925e7f14154defbac0f1d4c938" [[package]] name = "cranelift-codegen-shared" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd2a72db4301dbe7e5a4499035eedc1e82720009fb60603e20504d8691fa9cd" +checksum = "44aa1846df275bce5eb30379d65964c7afc63c05a117076e62a119c25fe174be" [[package]] name = "cranelift-entity" @@ -1154,9 +1154,9 @@ dependencies = [ [[package]] name = "cranelift-entity" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48868faa07cacf948dc4a1773648813c0e453ff9467e800ff10f6a78c021b546" +checksum = "a3a45d8d6318bf8fc518154d9298eab2a8154ec068a8885ff113f6db8d69bb3a" dependencies = [ "serde", ] @@ -1175,11 +1175,11 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "351c9d13b4ecd1a536215ec2fd1c3ee9ee8bc31af172abf1e45ed0adb7a931df" +checksum = "e07339bd461766deb7605169de039e01954768ff730fa1254e149001884a8525" dependencies = [ - "cranelift-codegen 0.77.0", + "cranelift-codegen 0.78.0", "log 0.4.14", "smallvec 1.7.0", "target-lexicon 0.12.0", @@ -1187,28 +1187,28 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6df8b556663d7611b137b24db7f6c8d9a8a27d7f29c7ea7835795152c94c1b75" +checksum = "03e2fca76ff57e0532936a71e3fc267eae6a19a86656716479c66e7f912e3d7b" dependencies = [ - "cranelift-codegen 0.77.0", + "cranelift-codegen 0.78.0", "libc", "target-lexicon 0.12.0", ] [[package]] name = "cranelift-wasm" -version = "0.77.0" +version = "0.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a69816d90db694fa79aa39b89dda7208a4ac74b6f2b8f3c4da26ee1c8bdfc5e" +checksum = "1f46fec547a1f8a32c54ea61c28be4f4ad234ad95342b718a9a9adcaadb0c778" dependencies = [ - "cranelift-codegen 0.77.0", - "cranelift-entity 0.77.0", - "cranelift-frontend 0.77.0", + "cranelift-codegen 0.78.0", + "cranelift-entity 0.78.0", + "cranelift-frontend 0.78.0", "itertools", "log 0.4.14", "smallvec 1.7.0", - "wasmparser 0.80.2", + "wasmparser 0.81.0", "wasmtime-types", ] @@ -2928,6 +2928,16 @@ dependencies = [ "futures-timer 2.0.2", ] +[[package]] +name = "io-lifetimes" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47f5ce4afb9bf504b9f496a3307676bc232122f91a93c4da6d540aa99a0a0e0b" +dependencies = [ + "rustc_version 0.4.0", + "winapi 0.3.9", +] + [[package]] name = "iovec" version = "0.1.4" @@ -2972,9 +2982,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "jobserver" @@ -3935,6 +3945,12 @@ dependencies = [ "statrs", ] +[[package]] +name = "linux-raw-sys" +version = "0.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "687387ff42ec7ea4f2149035a5675fedb675d26f98db90a1846ac63d3addb5f5" + [[package]] name = "lite-json" version = "0.1.3" @@ -4090,9 +4106,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memmap" @@ -4984,6 +5000,15 @@ name = "object" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386" +dependencies = [ + "memchr", +] + +[[package]] +name = "object" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ "crc32fast", "indexmap", @@ -7281,6 +7306,17 @@ dependencies = [ "smallvec 1.7.0", ] +[[package]] +name = "regalloc" +version = "0.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6304468554ed921da3d32c355ea107b8d13d7b8996c3adfb7aab48d3bc321f4" +dependencies = [ + "log 0.4.14", + "rustc-hash", + "smallvec 1.7.0", +] + [[package]] name = "regex" version = "1.5.4" @@ -7399,6 +7435,23 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "rsix" +version = "0.23.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f64c5788d5aab8b75441499d99576a24eb09f76fb267b36fec7e3d970c66431" +dependencies = [ + "bitflags", + "cc", + "errno", + "io-lifetimes", + "itoa", + "libc", + "linux-raw-sys", + "once_cell", + "rustc_version 0.4.0", +] + [[package]] name = "rustc-demangle" version = "0.1.18" @@ -7435,6 +7488,15 @@ dependencies = [ "semver 0.11.0", ] +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.4", +] + [[package]] name = "rustls" version = "0.19.1" @@ -8816,6 +8878,12 @@ dependencies = [ "serde", ] +[[package]] +name = "semver" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" + [[package]] name = "semver-parser" version = "0.7.0" @@ -11518,15 +11586,15 @@ checksum = "87cc2fe6350834b4e528ba0901e7aa405d78b89dc1fa3145359eb4de0e323fcf" [[package]] name = "wasmparser" -version = "0.80.2" +version = "0.81.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449167e2832691a1bff24cde28d2804e90e09586a448c8e76984792c44334a6b" +checksum = "98930446519f63d00a836efdc22f67766ceae8dbcc1571379f2bcabc6b2b9abc" [[package]] name = "wasmtime" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899b1e5261e3d3420860dacfb952871ace9d7ba9f953b314f67aaf9f8e2a4d89" +checksum = "311d06b0c49346d1fbf48a17052e844036b95a7753c1afb34e8c0af3f6b5bb13" dependencies = [ "anyhow", "backtrace", @@ -11537,7 +11605,7 @@ dependencies = [ "lazy_static", "libc", "log 0.4.14", - "object 0.26.0", + "object 0.27.1", "paste 1.0.4", "psm", "rayon", @@ -11545,7 +11613,7 @@ dependencies = [ "rustc-demangle", "serde", "target-lexicon 0.12.0", - "wasmparser 0.80.2", + "wasmparser 0.81.0", "wasmtime-cache", "wasmtime-cranelift", "wasmtime-environ", @@ -11556,18 +11624,17 @@ dependencies = [ [[package]] name = "wasmtime-cache" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2493b81d7a9935f7af15e06beec806f256bc974a90a843685f3d61f2fc97058" +checksum = "36147930a4995137dc096e5b17a573b446799be2bbaea433e821ce6a80abe2c5" dependencies = [ "anyhow", "base64 0.13.0", "bincode", "directories-next", - "errno", "file-per-thread-logger", - "libc", "log 0.4.14", + "rsix", "serde", "sha2 0.9.8", "toml", @@ -11577,66 +11644,67 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99706bacdf5143f7f967d417f0437cce83a724cf4518cb1a3ff40e519d793021" +checksum = "ab3083a47e1ede38aac06a1d9831640d673f9aeda0b82a64e4ce002f3432e2e7" dependencies = [ "anyhow", - "cranelift-codegen 0.77.0", - "cranelift-entity 0.77.0", - "cranelift-frontend 0.77.0", + "cranelift-codegen 0.78.0", + "cranelift-entity 0.78.0", + "cranelift-frontend 0.78.0", "cranelift-native", "cranelift-wasm", "gimli 0.25.0", + "log 0.4.14", "more-asserts", - "object 0.26.0", + "object 0.27.1", "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.80.2", + "wasmparser 0.81.0", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac42cb562a2f98163857605f02581d719a410c5abe93606128c59a10e84de85b" +checksum = "1c2d194b655321053bc4111a1aa4ead552655c8a17d17264bc97766e70073510" dependencies = [ "anyhow", "cfg-if 1.0.0", - "cranelift-entity 0.77.0", + "cranelift-entity 0.78.0", "gimli 0.25.0", "indexmap", "log 0.4.14", "more-asserts", - "object 0.26.0", + "object 0.27.1", "serde", "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.80.2", + "wasmparser 0.81.0", "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f46dd757225f29a419be415ea6fb8558df9b0194f07e3a6a9c99d0e14dd534" +checksum = "864ac8dfe4ce310ac59f16fdbd560c257389cb009ee5d030ac6e30523b023d11" dependencies = [ "addr2line", "anyhow", "bincode", "cfg-if 1.0.0", "gimli 0.25.0", - "libc", "log 0.4.14", "more-asserts", - "object 0.26.0", + "object 0.27.1", "region", + "rsix", "serde", "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.80.2", + "wasmparser 0.81.0", "wasmtime-environ", "wasmtime-runtime", "winapi 0.3.9", @@ -11644,9 +11712,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0122215a44923f395487048cb0a1d60b5b32c73aab15cf9364b798dbaff0996f" +checksum = "ab97da813a26b98c9abfd3b0c2d99e42f6b78b749c0646344e2e262d212d8c8b" dependencies = [ "anyhow", "backtrace", @@ -11661,6 +11729,7 @@ dependencies = [ "more-asserts", "rand 0.8.4", "region", + "rsix", "thiserror", "wasmtime-environ", "winapi 0.3.9", @@ -11668,14 +11737,14 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9b01caf8a204ef634ebac99700e77ba716d3ebbb68a1abbc2ceb6b16dbec9e4" +checksum = "ff94409cc3557bfbbcce6b14520ccd6bd3727e965c0fe68d63ef2c185bf379c6" dependencies = [ - "cranelift-entity 0.77.0", + "cranelift-entity 0.78.0", "serde", "thiserror", - "wasmparser 0.80.2", + "wasmparser 0.81.0", ] [[package]] diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 741898b8282c2..e52a53f71c06e 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -23,7 +23,7 @@ sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-in sp-runtime-interface = { version = "4.0.0-dev", path = "../../../primitives/runtime-interface" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } -wasmtime = { version = "0.30.0", default-features = false, features = [ +wasmtime = { version = "0.31.0", default-features = false, features = [ "cache", "cranelift", "jitdump", diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index 4edb9f9c423f0..fcb4c4cae3b8a 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -333,6 +333,7 @@ impl<'a, 'b, 'c, 'd> sandbox::SandboxContext for SandboxContext<'a, 'b, 'c, 'd> state: u32, func_idx: SupervisorFuncIndex, ) -> Result { + let mut ret_vals = [Val::null()]; let result = self.dispatch_thunk.call( &mut self.host_context.caller, &[ @@ -341,26 +342,16 @@ impl<'a, 'b, 'c, 'd> sandbox::SandboxContext for SandboxContext<'a, 'b, 'c, 'd> Val::I32(state as i32), Val::I32(usize::from(func_idx) as i32), ], + &mut ret_vals, ); match result { - Ok(ret_vals) => { - let ret_val = if ret_vals.len() != 1 { - return Err(format!( - "Supervisor function returned {} results, expected 1", - ret_vals.len() - ) - .into()) - } else { - &ret_vals[0] - }; - - if let Some(ret_val) = ret_val.i64() { + Ok(()) => + if let Some(ret_val) = ret_vals[0].i64() { Ok(ret_val) } else { return Err("Supervisor function returned unexpected result!".into()) - } - }, + }, Err(err) => Err(err.to_string().into()), } } From 7adee13ddc48a9d715eb2f9caa37d99713fa0102 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 9 Nov 2021 13:08:34 +0100 Subject: [PATCH 068/162] Bump parity-db (#10218) * Bump parity-db * Update cargo.lock --- Cargo.lock | 4 ++-- client/db/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8170b096de9dc..8985b5fb89338 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6223,9 +6223,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ccc4a8687027deb53d45c5434a1f1b330c9d1069a59cfe80a62aa9a1da25ae" +checksum = "7cb5195cb862b13055cf7f7a76c55073dc73885c2a61511e322b8c1666be7332" dependencies = [ "blake2-rfc", "crc32fast", diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 19bf7ad248fe5..437431a50883f 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -33,7 +33,7 @@ sc-state-db = { version = "0.10.0-dev", path = "../state-db" } sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } -parity-db = { version = "0.3.3", optional = true } +parity-db = { version = "0.3.4", optional = true } [dev-dependencies] sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } From 800fac14306a77c3dd7fa178c8d4a6a77d52d3bd Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 9 Nov 2021 22:38:31 +0900 Subject: [PATCH 069/162] Add a block production benchmark (#10104) * Add a block production benchmark * Simplify the block production benchmark * Cleanups; switch execution strategy to WASM * Switch WASM execution to `Compiled` * Reduce the setup cost of the benchmark Creating all of those extrinsics takes up *a lot* of time, up to the point where the majority of the time is actually spent *outside* of the code which we want to benchmark here. So let's only do it once. * Add a variant of the block production benchmark with proof recording --- Cargo.lock | 3 + bin/node/cli/Cargo.toml | 7 + bin/node/cli/benches/block_production.rs | 237 +++++++++++++++++++++++ 3 files changed, 247 insertions(+) create mode 100644 bin/node/cli/benches/block_production.rs diff --git a/Cargo.lock b/Cargo.lock index 8985b5fb89338..b164a74f94a55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4528,6 +4528,7 @@ dependencies = [ "node-runtime", "pallet-balances", "pallet-im-online", + "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", "platforms", @@ -4536,6 +4537,7 @@ dependencies = [ "remote-externalities", "sc-authority-discovery", "sc-basic-authorship", + "sc-block-builder", "sc-chain-spec", "sc-cli", "sc-client-api", @@ -4562,6 +4564,7 @@ dependencies = [ "sp-api", "sp-authority-discovery", "sp-authorship", + "sp-blockchain", "sp-consensus", "sp-consensus-babe", "sp-core", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 38c161a81ef06..15760c5a9abb4 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -116,7 +116,9 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } +sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } futures = "0.3.16" tempfile = "3.1.0" assert_cmd = "2.0.2" @@ -131,6 +133,7 @@ tokio = { version = "1.10", features = ["macros", "time"] } jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = ["tokio1"] } wait-timeout = "0.2" remote-externalities = { path = "../../../utils/frame/remote-externalities" } +pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } [build-dependencies] structopt = { version = "0.3.8", optional = true } @@ -166,3 +169,7 @@ try-runtime = ["node-runtime/try-runtime", "try-runtime-cli"] [[bench]] name = "transaction_pool" harness = false + +[[bench]] +name = "block_production" +harness = false diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs new file mode 100644 index 0000000000000..5a520e7b63397 --- /dev/null +++ b/bin/node/cli/benches/block_production.rs @@ -0,0 +1,237 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; + +use node_cli::service::{create_extrinsic, FullClient}; +use node_runtime::{constants::currency::*, BalancesCall}; +use sc_block_builder::{BlockBuilderProvider, BuiltBlock, RecordProof}; +use sc_client_api::execution_extensions::ExecutionStrategies; +use sc_consensus::{ + block_import::{BlockImportParams, ForkChoiceStrategy}, + BlockImport, StateAction, +}; +use sc_service::{ + config::{ + DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + PruningMode, TransactionStorageMode, WasmExecutionMethod, + }, + BasePath, Configuration, Role, +}; +use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed}; +use sp_consensus::BlockOrigin; +use sp_keyring::Sr25519Keyring; +use sp_runtime::{ + generic::BlockId, + transaction_validity::{InvalidTransaction, TransactionValidityError}, + AccountId32, MultiAddress, OpaqueExtrinsic, +}; +use tokio::runtime::Handle; + +fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { + let base_path = BasePath::new_temp_dir() + .expect("getting the base path of a temporary path doesn't fail; qed"); + let root = base_path.path().to_path_buf(); + + let network_config = NetworkConfiguration::new( + Sr25519Keyring::Alice.to_seed(), + "network/test/0.1", + Default::default(), + None, + ); + + let spec = Box::new(node_cli::chain_spec::development_config()); + + // NOTE: We enforce the use of the WASM runtime to benchmark block production using WASM. + let execution_strategy = sc_client_api::ExecutionStrategy::AlwaysWasm; + + let config = Configuration { + impl_name: "BenchmarkImpl".into(), + impl_version: "1.0".into(), + // We don't use the authority role since that would start producing blocks + // in the background which would mess with our benchmark. + role: Role::Full, + tokio_handle, + transaction_pool: Default::default(), + network: network_config, + keystore: KeystoreConfig::InMemory, + keystore_remote: Default::default(), + database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, + state_cache_size: 67108864, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + chain_spec: spec, + wasm_method: WasmExecutionMethod::Compiled, + execution_strategies: ExecutionStrategies { + syncing: execution_strategy, + importing: execution_strategy, + block_construction: execution_strategy, + offchain_worker: execution_strategy, + other: execution_strategy, + }, + rpc_http: None, + rpc_ws: None, + rpc_ipc: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + rpc_max_payload: None, + ws_max_out_buffer_capacity: None, + prometheus_config: None, + telemetry_endpoints: None, + default_heap_pages: None, + offchain_worker: OffchainWorkerConfig { enabled: true, indexing_enabled: false }, + force_authoring: false, + disable_grandpa: false, + dev_key_seed: Some(Sr25519Keyring::Alice.to_seed()), + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: Some(base_path), + informant_output_format: Default::default(), + wasm_runtime_overrides: None, + }; + + node_cli::service::new_full_base(config, |_, _| ()).expect("creating a full node doesn't fail") +} + +fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { + node_runtime::UncheckedExtrinsic { + signature: None, + function: node_runtime::Call::Timestamp(pallet_timestamp::Call::set { now }), + } + .into() +} + +fn import_block( + mut client: &FullClient, + built: BuiltBlock< + node_primitives::Block, + >::StateBackend, + >, +) { + let mut params = BlockImportParams::new(BlockOrigin::File, built.block.header); + params.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(built.storage_changes)); + params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + futures::executor::block_on(client.import_block(params, Default::default())) + .expect("importing a block doesn't fail"); +} + +fn prepare_benchmark(client: &FullClient) -> (usize, Vec) { + const MINIMUM_PERIOD_FOR_BLOCKS: u64 = 1500; + + let mut max_transfer_count = 0; + let mut extrinsics = Vec::new(); + let mut block_builder = client.new_block(Default::default()).unwrap(); + + // Every block needs one timestamp extrinsic. + let extrinsic_set_time = extrinsic_set_time(1 + MINIMUM_PERIOD_FOR_BLOCKS); + block_builder.push(extrinsic_set_time.clone()).unwrap(); + extrinsics.push(extrinsic_set_time); + + // Creating those is surprisingly costly, so let's only do it once and later just `clone` them. + let src = Sr25519Keyring::Alice.pair(); + let dst: MultiAddress = Sr25519Keyring::Bob.to_account_id().into(); + + // Add as many tranfer extrinsics as possible into a single block. + for nonce in 0.. { + let extrinsic: OpaqueExtrinsic = create_extrinsic( + client, + src.clone(), + BalancesCall::transfer { dest: dst.clone(), value: 1 * DOLLARS }, + Some(nonce), + ) + .into(); + + match block_builder.push(extrinsic.clone()) { + Ok(_) => {}, + Err(ApplyExtrinsicFailed(Validity(TransactionValidityError::Invalid( + InvalidTransaction::ExhaustsResources, + )))) => break, + Err(error) => panic!("{}", error), + } + + extrinsics.push(extrinsic); + max_transfer_count += 1; + } + + (max_transfer_count, extrinsics) +} + +fn block_production(c: &mut Criterion) { + sp_tracing::try_init_simple(); + + let runtime = tokio::runtime::Runtime::new().expect("creating tokio runtime doesn't fail; qed"); + let tokio_handle = runtime.handle().clone(); + + let node = new_node(tokio_handle.clone()); + let client = &*node.client; + + // Buliding the very first block is around ~30x slower than any subsequent one, + // so let's make sure it's built and imported before we benchmark anything. + let mut block_builder = client.new_block(Default::default()).unwrap(); + block_builder.push(extrinsic_set_time(1)).unwrap(); + import_block(client, block_builder.build().unwrap()); + + let (max_transfer_count, extrinsics) = prepare_benchmark(&client); + log::info!("Maximum transfer count: {}", max_transfer_count); + + let mut group = c.benchmark_group("Block production"); + + group.sample_size(10); + group.throughput(Throughput::Elements(max_transfer_count as u64)); + + let block_id = BlockId::Hash(client.chain_info().best_hash); + + group.bench_function(format!("{} transfers (no proof)", max_transfer_count), |b| { + b.iter_batched( + || extrinsics.clone(), + |extrinsics| { + let mut block_builder = + client.new_block_at(&block_id, Default::default(), RecordProof::No).unwrap(); + for extrinsic in extrinsics { + block_builder.push(extrinsic).unwrap(); + } + block_builder.build().unwrap() + }, + BatchSize::SmallInput, + ) + }); + + group.bench_function(format!("{} transfers (with proof)", max_transfer_count), |b| { + b.iter_batched( + || extrinsics.clone(), + |extrinsics| { + let mut block_builder = + client.new_block_at(&block_id, Default::default(), RecordProof::Yes).unwrap(); + for extrinsic in extrinsics { + block_builder.push(extrinsic).unwrap(); + } + block_builder.build().unwrap() + }, + BatchSize::SmallInput, + ) + }); +} + +criterion_group!(benches, block_production); +criterion_main!(benches); From dfe12bf772bfae862884af5ed4dd0b73011f5c84 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 9 Nov 2021 21:26:39 +0000 Subject: [PATCH 070/162] new remote-ext mode: (#10192) --- Cargo.lock | 1 + utils/frame/remote-externalities/Cargo.toml | 1 + utils/frame/remote-externalities/src/lib.rs | 122 +++++++++++++++----- 3 files changed, 94 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b164a74f94a55..b72f8ece09a4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7364,6 +7364,7 @@ name = "remote-externalities" version = "0.10.0-dev" dependencies = [ "env_logger 0.9.0", + "frame-support", "jsonrpsee-proc-macros", "jsonrpsee-ws-client", "log 0.4.14", diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 3e7c229ec4d65..9c9025d934aab 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -32,6 +32,7 @@ sp-version = { version = "4.0.0-dev", path = "../../../primitives/version" } [dev-dependencies] tokio = { version = "1.10", features = ["macros", "rt-multi-thread"] } pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", version = "5.0.0-dev" } +frame-support = { path = "../../../frame/support", version = "4.0.0-dev" } [features] remote-test = [] diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 3b9e08f75da85..bf8c57ae14ee6 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -22,7 +22,6 @@ use codec::{Decode, Encode}; use jsonrpsee_ws_client::{types::v2::params::JsonRpcParams, WsClient, WsClientBuilder}; -use log::*; use sp_core::{ hashing::twox_128, hexdisplay::HexDisplay, @@ -62,10 +61,12 @@ jsonrpsee_proc_macros::rpc_client_api! { /// The execution mode. #[derive(Clone)] pub enum Mode { - /// Online. + /// Online. Potentially writes to a cache file. Online(OnlineConfig), /// Offline. Uses a state snapshot file and needs not any client config. Offline(OfflineConfig), + /// Prefer using a cache file if it exists, else use a remote server. + OfflineOrElseOnline(OfflineConfig, OnlineConfig), } impl Default for Mode { @@ -83,6 +84,12 @@ pub struct OfflineConfig { pub state_snapshot: SnapshotConfig, } +impl> From

for SnapshotConfig { + fn from(p: P) -> Self { + Self { path: p.into() } + } +} + /// Description of the transport protocol (for online execution). #[derive(Debug)] pub struct Transport { @@ -193,6 +200,7 @@ impl Builder { fn as_online(&self) -> &OnlineConfig { match &self.mode { Mode::Online(config) => &config, + Mode::OfflineOrElseOnline(_, config) => &config, _ => panic!("Unexpected mode: Online"), } } @@ -200,6 +208,7 @@ impl Builder { fn as_online_mut(&mut self) -> &mut OnlineConfig { match &mut self.mode { Mode::Online(config) => config, + Mode::OfflineOrElseOnline(_, config) => config, _ => panic!("Unexpected mode: Online"), } } @@ -212,19 +221,19 @@ impl Builder { key: StorageKey, maybe_at: Option, ) -> Result { - trace!(target: LOG_TARGET, "rpc: get_storage"); + log::trace!(target: LOG_TARGET, "rpc: get_storage"); RpcApi::::get_storage(self.as_online().rpc_client(), key, maybe_at) .await .map_err(|e| { - error!("Error = {:?}", e); + log::error!(target: LOG_TARGET, "Error = {:?}", e); "rpc get_storage failed." }) } /// Get the latest finalized head. async fn rpc_get_head(&self) -> Result { - trace!(target: LOG_TARGET, "rpc: finalized_head"); + log::trace!(target: LOG_TARGET, "rpc: finalized_head"); RpcApi::::finalized_head(self.as_online().rpc_client()).await.map_err(|e| { - error!("Error = {:?}", e); + log::error!(target: LOG_TARGET, "Error = {:?}", e); "rpc finalized_head failed." }) } @@ -248,19 +257,19 @@ impl Builder { ) .await .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); + log::error!(target: LOG_TARGET, "Error = {:?}", e); "rpc get_keys failed" })?; let page_len = page.len(); all_keys.extend(page); if page_len < PAGE as usize { - debug!(target: LOG_TARGET, "last page received: {}", page_len); + log::debug!(target: LOG_TARGET, "last page received: {}", page_len); break all_keys } else { let new_last_key = all_keys.last().expect("all_keys is populated; has .last(); qed"); - debug!( + log::debug!( target: LOG_TARGET, "new total = {}, full page received: {:?}", all_keys.len(), @@ -286,7 +295,7 @@ impl Builder { use serde_json::to_value; let keys = self.get_keys_paged(prefix, at).await?; let keys_count = keys.len(); - debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len()); + log::debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len()); let mut key_values: Vec = vec![]; let client = self.as_online().rpc_client(); @@ -323,7 +332,7 @@ impl Builder { key_values.push((key.clone(), value)); if key_values.len() % (10 * BATCH_SIZE) == 0 { let ratio: f64 = key_values.len() as f64 / keys_count as f64; - debug!( + log::debug!( target: LOG_TARGET, "progress = {:.2} [{} / {}]", ratio, @@ -342,14 +351,14 @@ impl Builder { impl Builder { /// Save the given data as state snapshot. fn save_state_snapshot(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> { - debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); + log::debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; Ok(()) } /// initialize `Self` from state snapshot. Panics if the file does not exist. fn load_state_snapshot(&self, path: &Path) -> Result, &'static str> { - info!(target: LOG_TARGET, "scraping key-pairs from state snapshot {:?}", path); + log::info!(target: LOG_TARGET, "scraping key-pairs from state snapshot {:?}", path); let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; Decode::decode(&mut &*bytes).map_err(|_| "decode failed") } @@ -362,14 +371,14 @@ impl Builder { .at .expect("online config must be initialized by this point; qed.") .clone(); - info!(target: LOG_TARGET, "scraping key-pairs from remote @ {:?}", at); + log::info!(target: LOG_TARGET, "scraping key-pairs from remote @ {:?}", at); let mut keys_and_values = if config.pallets.len() > 0 { let mut filtered_kv = vec![]; for f in config.pallets.iter() { let hashed_prefix = StorageKey(twox_128(f.as_bytes()).to_vec()); let module_kv = self.rpc_get_pairs_paged(hashed_prefix.clone(), at).await?; - info!( + log::info!( target: LOG_TARGET, "downloaded data for module {} (count: {} / prefix: {:?}).", f, @@ -380,12 +389,12 @@ impl Builder { } filtered_kv } else { - info!(target: LOG_TARGET, "downloading data for all pallets."); + log::info!(target: LOG_TARGET, "downloading data for all pallets."); self.rpc_get_pairs_paged(StorageKey(vec![]), at).await? }; for prefix in &self.hashed_prefixes { - info!( + log::info!( target: LOG_TARGET, "adding data for hashed prefix: {:?}", HexDisplay::from(prefix) @@ -397,7 +406,11 @@ impl Builder { for key in &self.hashed_keys { let key = StorageKey(key.to_vec()); - info!(target: LOG_TARGET, "adding data for hashed key: {:?}", HexDisplay::from(&key)); + log::info!( + target: LOG_TARGET, + "adding data for hashed key: {:?}", + HexDisplay::from(&key) + ); let value = self.rpc_get_storage(key.clone(), Some(at)).await?; keys_and_values.push((key, value)); } @@ -407,7 +420,7 @@ impl Builder { pub(crate) async fn init_remote_client(&mut self) -> Result<(), &'static str> { let mut online = self.as_online_mut(); - debug!(target: LOG_TARGET, "initializing remote client to {:?}", online.transport.uri); + log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", online.transport.uri); // First, initialize the ws client. let ws_client = WsClientBuilder::default() @@ -437,11 +450,23 @@ impl Builder { } kp }, + Mode::OfflineOrElseOnline(offline_config, online_config) => { + if let Ok(kv) = self.load_state_snapshot(&offline_config.state_snapshot.path) { + kv + } else { + self.init_remote_client().await?; + let kp = self.load_remote().await?; + if let Some(c) = online_config.state_snapshot { + self.save_state_snapshot(&kp, &c.path)?; + } + kp + } + }, }; // inject manual key values. if !self.hashed_key_values.is_empty() { - debug!( + log::debug!( target: LOG_TARGET, "extending externalities with {} manually injected key-values", self.hashed_key_values.len() @@ -451,7 +476,7 @@ impl Builder { // exclude manual key values. if !self.hashed_blacklist.is_empty() { - debug!( + log::debug!( target: LOG_TARGET, "excluding externalities from {} keys", self.hashed_blacklist.len() @@ -522,7 +547,7 @@ impl Builder { let kv = self.pre_build().await?; let mut ext = TestExternalities::new_empty(); - info!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); + log::info!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); for (k, v) in kv { let (k, v) = (k.0, v.0); // Insert the key,value pair into the test trie backend @@ -603,12 +628,51 @@ mod remote_tests { const REMOTE_INACCESSIBLE: &'static str = "Can't reach the remote node. Is it running?"; + #[tokio::test] + async fn offline_else_online_works() { + init_logger(); + // this shows that in the second run, we use the remote and create a cache. + Builder::::new() + .mode(Mode::OfflineOrElseOnline( + OfflineConfig { + state_snapshot: SnapshotConfig::new("test_snapshot_to_remove.bin"), + }, + OnlineConfig { + pallets: vec!["Proxy".to_owned()], + state_snapshot: Some(SnapshotConfig::new("test_snapshot_to_remove.bin")), + ..Default::default() + }, + )) + .build() + .await + .expect(REMOTE_INACCESSIBLE) + .execute_with(|| {}); + + // this shows that in the second run, we are not using the remote + Builder::::new() + .mode(Mode::OfflineOrElseOnline( + OfflineConfig { + state_snapshot: SnapshotConfig::new("test_snapshot_to_remove.bin"), + }, + OnlineConfig { + pallets: vec!["Proxy".to_owned()], + state_snapshot: Some(SnapshotConfig::new("test_snapshot_to_remove.bin")), + transport: "ws://non-existent:666".to_owned().into(), + ..Default::default() + }, + )) + .build() + .await + .expect(REMOTE_INACCESSIBLE) + .execute_with(|| {}); + } + #[tokio::test] async fn can_build_one_pallet() { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - pallets: vec!["System".to_owned()], + pallets: vec!["Proxy".to_owned()], ..Default::default() })) .build() @@ -622,11 +686,7 @@ mod remote_tests { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - pallets: vec![ - "Proxy".to_owned(), - "Multisig".to_owned(), - "PhragmenElection".to_owned(), - ], + pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()], ..Default::default() })) .build() @@ -639,6 +699,7 @@ mod remote_tests { async fn sanity_check_decoding() { use pallet_elections_phragmen::SeatHolder; use sp_core::crypto::Ss58Codec; + type AccountId = sp_runtime::AccountId32; type Balance = u128; frame_support::generate_storage_alias!( @@ -676,7 +737,7 @@ mod remote_tests { Builder::::new() .mode(Mode::Online(OnlineConfig { state_snapshot: Some(SnapshotConfig::new("test_snapshot_to_remove.bin")), - pallets: vec!["Balances".to_owned()], + pallets: vec!["Proxy".to_owned()], ..Default::default() })) .build() @@ -684,7 +745,7 @@ mod remote_tests { .expect(REMOTE_INACCESSIBLE) .execute_with(|| {}); - let to_delete = std::fs::read_dir(SnapshotConfig::default().path) + let to_delete = std::fs::read_dir(Path::new(".")) .unwrap() .into_iter() .map(|d| d.unwrap()) @@ -699,6 +760,7 @@ mod remote_tests { } #[tokio::test] + #[ignore = "takes too much time on average."] async fn can_fetch_all() { init_logger(); Builder::::new() From 5111c72d3d611c0a643d89a305b2c9a4fdaa9134 Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Tue, 9 Nov 2021 14:27:40 -0700 Subject: [PATCH 071/162] move wiki -> docs (#10225) Co-authored-by: Dan Shields --- bin/utils/subkey/README.md | 2 +- client/cli/src/commands/run_cmd.rs | 4 ++-- primitives/core/src/crypto.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/utils/subkey/README.md b/bin/utils/subkey/README.md index 2310c59f4a283..e762a42c3e79e 100644 --- a/bin/utils/subkey/README.md +++ b/bin/utils/subkey/README.md @@ -69,7 +69,7 @@ The output above also show the **public key** and the **Account ID**. Those are The **SS58 address** (or **Public Address**) of a new account is a reprensentation of the public keys of an account for a given network (for instance Kusama or Polkadot). -You can read more about the SS58 format in the [substrate wiki](https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)) and see the list of reserved prefixes in the [Polkadot wiki](https://wiki.polkadot.network/docs/build-ss58-registry). +You can read more about the SS58 format in the [Substrate Docs](https://docs.substrate.io/v3/advanced/ss58/) and see the list of reserved prefixes in the [SS58 Registry](https://github.com/paritytech/ss58-registry). For instance, considering the previous seed `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` the SS58 addresses are: diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index d6c0133a7c145..ad2f04583c10b 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -58,7 +58,7 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[structopt(long = "rpc-external")] pub rpc_external: bool, @@ -89,7 +89,7 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[structopt(long = "ws-external")] pub ws_external: bool, diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 21b8520c7780f..4f21d62f5850d 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -219,7 +219,7 @@ pub enum PublicError { /// Key that can be encoded to/from SS58. /// -/// See +/// See /// for information on the codec. #[cfg(feature = "full_crypto")] pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { From 5fe5974173b3f4edb83cb4f58c4af2a02685a364 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Tue, 9 Nov 2021 22:33:21 +0000 Subject: [PATCH 072/162] upgrade ss58-registry with additional networks. (#10224) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * upgrade ss58-registry with additional networks. * adding cargo lock * Update primitives/core/Cargo.toml Co-authored-by: Bastian Köcher * turn on std Co-authored-by: Bastian Köcher --- Cargo.lock | 4 ++-- primitives/core/Cargo.toml | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b72f8ece09a4f..5e940b93f65f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9995,9 +9995,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "ss58-registry" -version = "1.0.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2413ecc7946ca99368862851dc1359f1477bc654ecfb135cf3efcb85ceca5f" +checksum = "c66cd4c4bb7ee41dc5b0c13d600574ae825d3a02e8f31326b17ac71558f2c836" dependencies = [ "Inflector", "proc-macro2", diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 8262393e653fe..9948b2e968c26 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -68,7 +68,7 @@ twox-hash = { version = "1.6.1", default-features = false, optional = true } libsecp256k1 = { version = "0.6", default-features = false, features = ["hmac", "static-context"], optional = true } sp-core-hashing = { version = "4.0.0-dev", path = "./hashing", default-features = false, optional = true } merlin = { version = "2.0", default-features = false, optional = true } -ss58-registry = "1.0.0" +ss58-registry = { version = "1.5.0", default-features = false } sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } [dev-dependencies] @@ -125,6 +125,7 @@ std = [ "sp-externalities", "sp-storage/std", "sp-runtime-interface/std", + "ss58-registry/std", "zeroize/alloc", "secrecy/alloc", "futures", From 1a6631117fdc66a65f3d484d5c8c015c4207f7c7 Mon Sep 17 00:00:00 2001 From: zjb0807 Date: Wed, 10 Nov 2021 16:03:52 +0800 Subject: [PATCH 073/162] Upgrade wasm builder (#10226) * add TypeInfo for DispatchTime * upgrade wasm-builder to Rust 2021 * remove resolver * revert resolver in virtual workspace --- utils/wasm-builder/src/prerequisites.rs | 2 +- utils/wasm-builder/src/wasm_project.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index c45f7933a1de3..7236b8169bcb5 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -58,7 +58,7 @@ fn create_check_toolchain_project(project_dir: &Path) { [package] name = "wasm-test" version = "1.0.0" - edition = "2018" + edition = "2021" build = "build.rs" [lib] diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 3806a890a1064..59214ab483c62 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -266,8 +266,7 @@ fn create_project_cargo_toml( let mut package = Table::new(); package.insert("name".into(), format!("{}-wasm", crate_name).into()); package.insert("version".into(), "1.0.0".into()); - package.insert("edition".into(), "2018".into()); - package.insert("resolver".into(), "2".into()); + package.insert("edition".into(), "2021".into()); wasm_workspace_toml.insert("package".into(), package.into()); From 7b24a94cb065ca1018019c825e6dfc21327b47c3 Mon Sep 17 00:00:00 2001 From: Jimmy Chu Date: Wed, 10 Nov 2021 16:11:28 +0800 Subject: [PATCH 074/162] Move all example pallets under `examples` folder. (#10215) * Put all examples under one folder Signed-off-by: Jimmy Chu * Updated Cargo.toml Signed-off-by: Jimmy Chu * updated for ci script Signed-off-by: Jimmy Chu * update Signed-off-by: Jimmy Chu * Added notes that example pallets are not meant to be used in production. Signed-off-by: Jimmy Chu * updated Signed-off-by: Jimmy Chu --- .gitlab-ci.yml | 6 +++--- Cargo.lock | 2 +- Cargo.toml | 6 +++--- frame/benchmarking/src/lib.rs | 4 ++-- frame/{example => examples/basic}/Cargo.toml | 21 ++++++++++--------- frame/{example => examples/basic}/README.md | 14 +++++++------ .../basic}/src/benchmarking.rs | 6 +++--- frame/{example => examples/basic}/src/lib.rs | 8 ++++--- .../{example => examples/basic}/src/tests.rs | 14 ++++++------- .../basic}/src/weights.rs | 8 +++---- .../offchain-worker}/Cargo.toml | 17 ++++++++------- .../offchain-worker}/README.md | 4 +++- .../offchain-worker}/src/lib.rs | 3 +++ .../offchain-worker}/src/tests.rs | 0 .../parallel}/Cargo.toml | 15 ++++++------- frame/examples/parallel/README.md | 7 +++++++ .../parallel}/src/lib.rs | 6 ++++-- .../parallel}/src/tests.rs | 0 18 files changed, 81 insertions(+), 60 deletions(-) rename frame/{example => examples/basic}/Cargo.toml (78%) rename frame/{example => examples/basic}/README.md (94%) rename frame/{example => examples/basic}/src/benchmarking.rs (93%) rename frame/{example => examples/basic}/src/lib.rs (99%) rename frame/{example => examples/basic}/src/tests.rs (92%) rename frame/{example => examples/basic}/src/weights.rs (93%) rename frame/{example-offchain-worker => examples/offchain-worker}/Cargo.toml (82%) rename frame/{example-offchain-worker => examples/offchain-worker}/README.md (87%) rename frame/{example-offchain-worker => examples/offchain-worker}/src/lib.rs (99%) rename frame/{example-offchain-worker => examples/offchain-worker}/src/tests.rs (100%) rename frame/{example-parallel => examples/parallel}/Cargo.toml (84%) create mode 100644 frame/examples/parallel/README.md rename frame/{example-parallel => examples/parallel}/src/lib.rs (95%) rename frame/{example-parallel => examples/parallel}/src/tests.rs (100%) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5e406b36c5c5b..210bb447e4c7b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -41,7 +41,7 @@ variables: &default-vars CI_IMAGE: "paritytech/ci-linux:production" # FIXME set to release CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.12" - CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example pallet-example-* subkey chain-spec-builder" + CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example-* subkey chain-spec-builder" VAULT_SERVER_URL: "https://vault.parity-mgmt-vault.parity.io" VAULT_AUTH_PATH: "gitlab-parity-io-jwt" VAULT_AUTH_ROLE: "cicd_gitlab_parity_${CI_PROJECT_NAME}" @@ -482,9 +482,9 @@ test-frame-examples-compile-to-wasm: RUSTFLAGS: "-Cdebug-assertions=y" RUST_BACKTRACE: 1 script: - - cd frame/example-offchain-worker/ + - cd frame/examples/offchain-worker/ - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features - - cd ../example + - cd ../basic - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features - sccache -s diff --git a/Cargo.lock b/Cargo.lock index 5e940b93f65f2..a7bb471f90d8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5515,7 +5515,7 @@ dependencies = [ ] [[package]] -name = "pallet-example" +name = "pallet-example-basic" version = "4.0.0-dev" dependencies = [ "frame-benchmarking", diff --git a/Cargo.toml b/Cargo.toml index 07053a0ef3162..ca60af692497d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,9 +84,9 @@ members = [ "frame/elections", "frame/election-provider-multi-phase", "frame/election-provider-support", - "frame/example", - "frame/example-offchain-worker", - "frame/example-parallel", + "frame/examples/basic", + "frame/examples/offchain-worker", + "frame/examples/parallel", "frame/executive", "frame/gilt", "frame/grandpa", diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 258b40cbe6f0b..088dbeb0bb78d 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1217,7 +1217,7 @@ macro_rules! impl_benchmark_test { /// This creates a test suite which runs the module's benchmarks. /// -/// When called in `pallet_example` as +/// When called in `pallet_example_basic` as /// /// ```rust,ignore /// impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); @@ -1243,7 +1243,7 @@ macro_rules! impl_benchmark_test { /// } /// ``` /// -/// When called inside the `benchmarks` macro of the `pallet_example` as +/// When called inside the `benchmarks` macro of the `pallet_example_basic` as /// /// ```rust,ignore /// benchmarks! { diff --git a/frame/example/Cargo.toml b/frame/examples/basic/Cargo.toml similarity index 78% rename from frame/example/Cargo.toml rename to frame/examples/basic/Cargo.toml index 0b3a742de0dd7..a4e8ffe3261cd 100644 --- a/frame/example/Cargo.toml +++ b/frame/examples/basic/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "pallet-example" +name = "pallet-example-basic" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" @@ -14,18 +14,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../balances" } -sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } -sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } log = { version = "0.4.14", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking", optional = true } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } [dev-dependencies] -sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core", default-features = false } [features] default = ["std"] diff --git a/frame/example/README.md b/frame/examples/basic/README.md similarity index 94% rename from frame/example/README.md rename to frame/examples/basic/README.md index e06dee78c3f81..358829192f11d 100644 --- a/frame/example/README.md +++ b/frame/examples/basic/README.md @@ -1,11 +1,13 @@ -# Example Pallet +# Basic Example Pallet The Example: A simple example of a FRAME pallet demonstrating concepts, APIs and structures common to most FRAME runtimes. -Run `cargo doc --package pallet-example --open` to view this pallet's documentation. +Run `cargo doc --package pallet-example-basic --open` to view this pallet's documentation. + +**This pallet serves as an example and is not meant to be used in production.** ### Documentation Guidelines: @@ -34,7 +36,7 @@ Run `cargo doc --package pallet-example --open` to view this pallet's documentat ### Documentation Template:
-Copy and paste this template from frame/example/src/lib.rs into file +Copy and paste this template from frame/examples/basic/src/lib.rs into file `frame//src/lib.rs` of your own custom pallet and complete it.

 // Add heading with custom pallet name
@@ -46,9 +48,9 @@ Copy and paste this template from frame/example/src/lib.rs into file
 // Include the following links that shows what trait needs to be implemented to use the pallet
 // and the supported dispatchables that are documented in the Call enum.
 
-- \[`::Config`](https://docs.rs/pallet-example/latest/pallet_example/trait.Config.html)
-- \[`Call`](https://docs.rs/pallet-example/latest/pallet_example/enum.Call.html)
-- \[`Module`](https://docs.rs/pallet-example/latest/pallet_example/struct.Module.html)
+- \[`::Config`](https://docs.rs/pallet-example-basic/latest/pallet_example_basic/trait.Config.html)
+- \[`Call`](https://docs.rs/pallet-example-basic/latest/pallet_example_basic/enum.Call.html)
+- \[`Module`](https://docs.rs/pallet-example-basic/latest/pallet_example_basic/struct.Module.html)
 
 \## Overview
 
diff --git a/frame/example/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs
similarity index 93%
rename from frame/example/src/benchmarking.rs
rename to frame/examples/basic/src/benchmarking.rs
index e89c646e03f1a..a031b15834d63 100644
--- a/frame/example/src/benchmarking.rs
+++ b/frame/examples/basic/src/benchmarking.rs
@@ -15,7 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! Benchmarking for pallet-example.
+//! Benchmarking for pallet-example-basic.
 
 #![cfg(feature = "runtime-benchmarks")]
 
@@ -23,7 +23,7 @@ use crate::*;
 use frame_benchmarking::{benchmarks, whitelisted_caller};
 use frame_system::RawOrigin;
 
-// To actually run this benchmark on pallet-example, we need to put this pallet into the
+// To actually run this benchmark on pallet-example-basic, we need to put this pallet into the
 //   runtime and compile it with `runtime-benchmarks` feature. The detail procedures are
 //   documented at:
 //   https://docs.substrate.io/v3/runtime/benchmarking#how-to-benchmark
@@ -67,7 +67,7 @@ benchmarks! {
 	}
 
 	// This line generates test cases for benchmarking, and could be run by:
-	//   `cargo test -p pallet-example --all-features`, you will see one line per case:
+	//   `cargo test -p pallet-example-basic --all-features`, you will see one line per case:
 	//   `test benchmarking::bench_sort_vector ... ok`
 	//   `test benchmarking::bench_accumulate_dummy ... ok`
 	//   `test benchmarking::bench_set_dummy_benchmark ... ok` in the result.
diff --git a/frame/example/src/lib.rs b/frame/examples/basic/src/lib.rs
similarity index 99%
rename from frame/example/src/lib.rs
rename to frame/examples/basic/src/lib.rs
index 981274b1ba739..b172acb66d324 100644
--- a/frame/example/src/lib.rs
+++ b/frame/examples/basic/src/lib.rs
@@ -16,13 +16,15 @@
 // limitations under the License.
 
 //! 
-//! # Example Pallet
+//! # Basic Example Pallet
 //!
 //! 
 //! The Example: A simple example of a FRAME pallet demonstrating
 //! concepts, APIs and structures common to most FRAME runtimes.
 //!
-//! Run `cargo doc --package pallet-example --open` to view this pallet's documentation.
+//! Run `cargo doc --package pallet-example-basic --open` to view this pallet's documentation.
+//!
+//! **This pallet serves as an example and is not meant to be used in production.**
 //!
 //! ### Documentation Guidelines:
 //!
@@ -59,7 +61,7 @@
 //!
 //! ### Documentation Template:
//! -//! Copy and paste this template from frame/example/src/lib.rs into file +//! Copy and paste this template from frame/examples/basic/src/lib.rs into file //! `frame//src/lib.rs` of your own custom pallet and complete it. //!

 //! // Add heading with custom pallet name
diff --git a/frame/example/src/tests.rs b/frame/examples/basic/src/tests.rs
similarity index 92%
rename from frame/example/src/tests.rs
rename to frame/examples/basic/src/tests.rs
index 4c2274572db81..e069cccf8d800 100644
--- a/frame/example/src/tests.rs
+++ b/frame/examples/basic/src/tests.rs
@@ -15,7 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! Tests for pallet-example.
+//! Tests for pallet-example-basic.
 
 use crate::*;
 use frame_support::{
@@ -32,7 +32,7 @@ use sp_runtime::{
 	BuildStorage,
 };
 // Reexport crate as its pallet name for construct_runtime.
-use crate as pallet_example;
+use crate as pallet_example_basic;
 
 type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic;
 type Block = frame_system::mocking::MockBlock;
@@ -46,7 +46,7 @@ frame_support::construct_runtime!(
 	{
 		System: frame_system::{Pallet, Call, Config, Storage, Event},
 		Balances: pallet_balances::{Pallet, Call, Storage, Config, Event},
-		Example: pallet_example::{Pallet, Call, Storage, Config, Event},
+		Example: pallet_example_basic::{Pallet, Call, Storage, Config, Event},
 	}
 );
 
@@ -111,7 +111,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities {
 		// We use default for brevity, but you can configure as desired if needed.
 		system: Default::default(),
 		balances: Default::default(),
-		example: pallet_example::GenesisConfig {
+		example: pallet_example_basic::GenesisConfig {
 			dummy: 42,
 			// we configure the map with (key, value) pairs.
 			bar: vec![(1, 2), (2, 3)],
@@ -163,7 +163,7 @@ fn set_dummy_works() {
 #[test]
 fn signed_ext_watch_dummy_works() {
 	new_test_ext().execute_with(|| {
-		let call = pallet_example::Call::set_dummy { new_value: 10 }.into();
+		let call = pallet_example_basic::Call::set_dummy { new_value: 10 }.into();
 		let info = DispatchInfo::default();
 
 		assert_eq!(
@@ -192,14 +192,14 @@ fn counted_map_works() {
 #[test]
 fn weights_work() {
 	// must have a defined weight.
-	let default_call = pallet_example::Call::::accumulate_dummy { increase_by: 10 };
+	let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 };
 	let info1 = default_call.get_dispatch_info();
 	// aka. `let info =  as GetDispatchInfo>::get_dispatch_info(&default_call);`
 	assert!(info1.weight > 0);
 
 	// `set_dummy` is simpler than `accumulate_dummy`, and the weight
 	//   should be less.
-	let custom_call = pallet_example::Call::::set_dummy { new_value: 20 };
+	let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 };
 	let info2 = custom_call.get_dispatch_info();
 	assert!(info1.weight > info2.weight);
 }
diff --git a/frame/example/src/weights.rs b/frame/examples/basic/src/weights.rs
similarity index 93%
rename from frame/example/src/weights.rs
rename to frame/examples/basic/src/weights.rs
index efcfdc6729b53..048f7f5a8b8e5 100644
--- a/frame/example/src/weights.rs
+++ b/frame/examples/basic/src/weights.rs
@@ -15,7 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! Autogenerated weights for pallet_example
+//! Autogenerated weights for pallet_example_basic
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
 //! DATE: 2021-03-15, STEPS: `[100, ]`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]`
@@ -31,7 +31,7 @@
 // --wasm-execution
 // compiled
 // --pallet
-// pallet_example
+// pallet_example_basic
 // --extrinsic
 // *
 // --steps
@@ -52,14 +52,14 @@
 use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
 use sp_std::marker::PhantomData;
 
-/// Weight functions needed for pallet_example.
+/// Weight functions needed for pallet_example_basic.
 pub trait WeightInfo {
 	fn set_dummy_benchmark(b: u32, ) -> Weight;
 	fn accumulate_dummy(b: u32, ) -> Weight;
 	fn sort_vector(x: u32, ) -> Weight;
 }
 
-/// Weights for pallet_example using the Substrate node and recommended hardware.
+/// Weights for pallet_example_basic using the Substrate node and recommended hardware.
 pub struct SubstrateWeight(PhantomData);
 impl WeightInfo for SubstrateWeight {
 	fn set_dummy_benchmark(b: u32, ) -> Weight {
diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/examples/offchain-worker/Cargo.toml
similarity index 82%
rename from frame/example-offchain-worker/Cargo.toml
rename to frame/examples/offchain-worker/Cargo.toml
index ffcadca26e11e..cb505ad42f254 100644
--- a/frame/example-offchain-worker/Cargo.toml
+++ b/frame/examples/offchain-worker/Cargo.toml
@@ -14,16 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
-scale-info = { version = "1.0", default-features = false, features = ["derive"] }
-frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" }
-frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" }
-sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" }
-sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore", optional = true }
-sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" }
-sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" }
-sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" }
 lite-json = { version = "0.1", default-features = false }
 log = { version = "0.4.14", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+
+frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" }
+frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" }
+sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" }
+sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" }
+sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore", optional = true }
+sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" }
+sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" }
 
 [features]
 default = ["std"]
diff --git a/frame/example-offchain-worker/README.md b/frame/examples/offchain-worker/README.md
similarity index 87%
rename from frame/example-offchain-worker/README.md
rename to frame/examples/offchain-worker/README.md
index 5299027f39250..587431c92c0ed 100644
--- a/frame/example-offchain-worker/README.md
+++ b/frame/examples/offchain-worker/README.md
@@ -1,5 +1,5 @@
 
-# Offchain Worker Example Module
+# Offchain Worker Example Pallet
 
 The Offchain Worker Example: A simple pallet demonstrating
 concepts, APIs and structures common to most offchain workers.
@@ -11,6 +11,8 @@ documentation.
 - [`Call`](./enum.Call.html)
 - [`Module`](./struct.Module.html)
 
+**This pallet serves as an example showcasing Substrate off-chain worker and is not meant to be
+used in production.**
 
 ## Overview
 
diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs
similarity index 99%
rename from frame/example-offchain-worker/src/lib.rs
rename to frame/examples/offchain-worker/src/lib.rs
index 9b63ffa663ee2..e5f2e00d9a344 100644
--- a/frame/example-offchain-worker/src/lib.rs
+++ b/frame/examples/offchain-worker/src/lib.rs
@@ -28,6 +28,8 @@
 //! - [`Call`]
 //! - [`Pallet`]
 //!
+//! **This pallet serves as an example showcasing Substrate off-chain worker and is not meant to
+//! be used in production.**
 //!
 //! ## Overview
 //!
@@ -40,6 +42,7 @@
 //! Additional logic in OCW is put in place to prevent spamming the network with both signed
 //! and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only
 //! one unsigned transaction floating in the network.
+
 #![cfg_attr(not(feature = "std"), no_std)]
 
 use codec::{Decode, Encode};
diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs
similarity index 100%
rename from frame/example-offchain-worker/src/tests.rs
rename to frame/examples/offchain-worker/src/tests.rs
diff --git a/frame/example-parallel/Cargo.toml b/frame/examples/parallel/Cargo.toml
similarity index 84%
rename from frame/example-parallel/Cargo.toml
rename to frame/examples/parallel/Cargo.toml
index 169db35e65f1c..cf7f46b232247 100644
--- a/frame/example-parallel/Cargo.toml
+++ b/frame/examples/parallel/Cargo.toml
@@ -14,13 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"]
 [dependencies]
 codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
-frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" }
-frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" }
-sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" }
-sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" }
-sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" }
-sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" }
-sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../primitives/tasks" }
+
+frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" }
+frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" }
+sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" }
+sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" }
+sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" }
+sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" }
+sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" }
 
 [features]
 default = ["std"]
diff --git a/frame/examples/parallel/README.md b/frame/examples/parallel/README.md
new file mode 100644
index 0000000000000..44b39a41507db
--- /dev/null
+++ b/frame/examples/parallel/README.md
@@ -0,0 +1,7 @@
+
+# Parallel Tasks Example Pallet
+
+This example pallet demonstrates parallelizing validation of the enlisted participants (see
+`enlist_participants` dispatch).
+
+**This pallet serves as an example and is not meant to be used in production.**
diff --git a/frame/example-parallel/src/lib.rs b/frame/examples/parallel/src/lib.rs
similarity index 95%
rename from frame/example-parallel/src/lib.rs
rename to frame/examples/parallel/src/lib.rs
index 9d191525f631e..51e022bed08b3 100644
--- a/frame/example-parallel/src/lib.rs
+++ b/frame/examples/parallel/src/lib.rs
@@ -15,10 +15,12 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//! Parallel tasks example
+//! # Parallel Tasks Example Pallet
 //!
-//! This example pallet parallelizes validation of the enlisted participants
+//! This example pallet demonstrates parallelizing validation of the enlisted participants
 //! (see `enlist_participants` dispatch).
+//!
+//! **This pallet serves as an example and is not meant to be used in production.**
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
diff --git a/frame/example-parallel/src/tests.rs b/frame/examples/parallel/src/tests.rs
similarity index 100%
rename from frame/example-parallel/src/tests.rs
rename to frame/examples/parallel/src/tests.rs

From 6ec26dfc9268ab0a43fa41db84386261ed000d9b Mon Sep 17 00:00:00 2001
From: Kian Paimani <5588131+kianenigma@users.noreply.github.com>
Date: Wed, 10 Nov 2021 09:33:14 +0000
Subject: [PATCH 075/162] use CountedMap in pallet-bags-list (#10179)

* use CountedMap in pallet-bags-list

* Fix build

* Update frame/bags-list/src/list/mod.rs

Co-authored-by: Keith Yeung 

* add a check as well

Co-authored-by: Keith Yeung 
---
 bin/node/runtime/src/lib.rs                   |  2 +-
 frame/bags-list/Cargo.toml                    |  5 +-
 frame/bags-list/src/lib.rs                    | 12 ++---
 frame/bags-list/src/list/mod.rs               | 33 +++++--------
 frame/bags-list/src/list/tests.rs             | 23 ++++++---
 frame/bags-list/src/migrations.rs             | 49 +++++++++++++++++++
 frame/bags-list/src/tests.rs                  |  2 +-
 .../support/src/storage/types/counted_map.rs  | 12 +++--
 8 files changed, 94 insertions(+), 44 deletions(-)
 create mode 100644 frame/bags-list/src/migrations.rs

diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs
index 570abe53ed01f..6d04ca8fdca87 100644
--- a/bin/node/runtime/src/lib.rs
+++ b/bin/node/runtime/src/lib.rs
@@ -1330,7 +1330,7 @@ pub type Executive = frame_executive::Executive<
 	frame_system::ChainContext,
 	Runtime,
 	AllPallets,
-	(),
+	pallet_bags_list::migrations::CheckCounterPrefix,
 >;
 
 /// MMR helper types.
diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml
index fa47b9bad5692..6d4cf2363c4f7 100644
--- a/frame/bags-list/Cargo.toml
+++ b/frame/bags-list/Cargo.toml
@@ -64,9 +64,10 @@ runtime-benchmarks = [
 	"frame-election-provider-support/runtime-benchmarks",
 ]
 fuzz = [
-  "sp-core",
+	"sp-core",
 	"sp-io",
-  "pallet-balances",
+	"pallet-balances",
 	"sp-tracing",
 ]
+try-runtime = [ "frame-support/try-runtime" ]
 
diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs
index b7f96799e459f..8d74ecc9bd2d1 100644
--- a/frame/bags-list/src/lib.rs
+++ b/frame/bags-list/src/lib.rs
@@ -59,6 +59,7 @@ use sp_std::prelude::*;
 mod benchmarks;
 
 mod list;
+pub mod migrations;
 #[cfg(any(test, feature = "fuzz"))]
 pub mod mock;
 #[cfg(test)]
@@ -151,17 +152,12 @@ pub mod pallet {
 		type BagThresholds: Get<&'static [VoteWeight]>;
 	}
 
-	/// How many ids are registered.
-	// NOTE: This is merely a counter for `ListNodes`. It should someday be replaced by the
-	// `CountedMap` storage.
-	#[pallet::storage]
-	pub(crate) type CounterForListNodes = StorageValue<_, u32, ValueQuery>;
-
 	/// A single node, within some bag.
 	///
 	/// Nodes store links forward and back within their respective bags.
 	#[pallet::storage]
-	pub(crate) type ListNodes = StorageMap<_, Twox64Concat, T::AccountId, list::Node>;
+	pub(crate) type ListNodes =
+		CountedStorageMap<_, Twox64Concat, T::AccountId, list::Node>;
 
 	/// A bag stored in storage.
 	///
@@ -240,7 +236,7 @@ impl SortedListProvider for Pallet {
 	}
 
 	fn count() -> u32 {
-		CounterForListNodes::::get()
+		ListNodes::::count()
 	}
 
 	fn contains(id: &T::AccountId) -> bool {
diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs
index 4efc3163816ff..df966eea80cee 100644
--- a/frame/bags-list/src/list/mod.rs
+++ b/frame/bags-list/src/list/mod.rs
@@ -18,7 +18,7 @@
 //! Implementation of a "bags list": a semi-sorted list where ordering granularity is dictated by
 //! configurable thresholds that delineate the boundaries of bags. It uses a pattern of composite
 //! data structures, where multiple storage items are masked by one outer API. See [`ListNodes`],
-//! [`CounterForListNodes`] and [`ListBags`] for more information.
+//! [`ListBags`] for more information.
 //!
 //! The outer API of this module is the [`List`] struct. It wraps all acceptable operations on top
 //! of the aggregate linked list. All operations with the bags list should happen through this
@@ -77,17 +77,18 @@ pub struct List(PhantomData);
 
 impl List {
 	/// Remove all data associated with the list from storage. Parameter `items` is the number of
-	/// items to clear from the list. WARNING: `None` will clear all items and should generally not
-	/// be used in production as it could lead to an infinite number of storage accesses.
+	/// items to clear from the list.
+	///
+	/// ## WARNING
+	///
+	/// `None` will clear all items and should generally not be used in production as it could lead
+	/// to a very large number of storage accesses.
 	pub(crate) fn clear(maybe_count: Option) -> u32 {
 		crate::ListBags::::remove_all(maybe_count);
+		let pre = crate::ListNodes::::count();
 		crate::ListNodes::::remove_all(maybe_count);
-		if let Some(count) = maybe_count {
-			crate::CounterForListNodes::::mutate(|items| *items - count);
-			count
-		} else {
-			crate::CounterForListNodes::::take()
-		}
+		let post = crate::ListNodes::::count();
+		pre.saturating_sub(post)
 	}
 
 	/// Regenerate all of the data from the given ids.
@@ -274,17 +275,13 @@ impl List {
 		// new inserts are always the tail, so we must write the bag.
 		bag.put();
 
-		crate::CounterForListNodes::::mutate(|prev_count| {
-			*prev_count = prev_count.saturating_add(1)
-		});
-
 		crate::log!(
 			debug,
 			"inserted {:?} with weight {} into bag {:?}, new count is {}",
 			id,
 			weight,
 			bag_weight,
-			crate::CounterForListNodes::::get(),
+			crate::ListNodes::::count(),
 		);
 
 		Ok(())
@@ -331,10 +328,6 @@ impl List {
 			bag.put();
 		}
 
-		crate::CounterForListNodes::::mutate(|prev_count| {
-			*prev_count = prev_count.saturating_sub(count)
-		});
-
 		count
 	}
 
@@ -390,7 +383,7 @@ impl List {
 	/// is being used, after all other staking data (such as counter) has been updated. It checks:
 	///
 	/// * there are no duplicate ids,
-	/// * length of this list is in sync with `CounterForListNodes`,
+	/// * length of this list is in sync with `ListNodes::count()`,
 	/// * and sanity-checks all bags and nodes. This will cascade down all the checks and makes sure
 	/// all bags and nodes are checked per *any* update to `List`.
 	#[cfg(feature = "std")]
@@ -403,7 +396,7 @@ impl List {
 		);
 
 		let iter_count = Self::iter().count() as u32;
-		let stored_count = crate::CounterForListNodes::::get();
+		let stored_count = crate::ListNodes::::count();
 		let nodes_count = crate::ListNodes::::iter().count() as u32;
 		ensure!(iter_count == stored_count, "iter_count != stored_count");
 		ensure!(stored_count == nodes_count, "stored_count != nodes_count");
diff --git a/frame/bags-list/src/list/tests.rs b/frame/bags-list/src/list/tests.rs
index 14802bac9d1d8..1c345df9a2fbd 100644
--- a/frame/bags-list/src/list/tests.rs
+++ b/frame/bags-list/src/list/tests.rs
@@ -18,7 +18,7 @@
 use super::*;
 use crate::{
 	mock::{test_utils::*, *},
-	CounterForListNodes, ListBags, ListNodes,
+	ListBags, ListNodes,
 };
 use frame_election_provider_support::SortedListProvider;
 use frame_support::{assert_ok, assert_storage_noop};
@@ -29,7 +29,7 @@ fn basic_setup_works() {
 		// syntactic sugar to create a raw node
 		let node = |id, prev, next, bag_upper| Node:: { id, prev, next, bag_upper };
 
-		assert_eq!(CounterForListNodes::::get(), 4);
+		assert_eq!(ListNodes::::count(), 4);
 		assert_eq!(ListNodes::::iter().count(), 4);
 		assert_eq!(ListBags::::iter().count(), 2);
 
@@ -249,10 +249,10 @@ mod list {
 
 	#[test]
 	fn remove_works() {
-		use crate::{CounterForListNodes, ListBags, ListNodes};
+		use crate::{ListBags, ListNodes};
 		let ensure_left = |id, counter| {
 			assert!(!ListNodes::::contains_key(id));
-			assert_eq!(CounterForListNodes::::get(), counter);
+			assert_eq!(ListNodes::::count(), counter);
 			assert_eq!(ListNodes::::iter().count() as u32, counter);
 		};
 
@@ -357,10 +357,19 @@ mod list {
 			assert_eq!(List::::sanity_check(), Err("duplicate identified"));
 		});
 
-		// ensure count is in sync with `CounterForListNodes`.
+		// ensure count is in sync with `ListNodes::count()`.
 		ExtBuilder::default().build_and_execute_no_post_check(|| {
-			crate::CounterForListNodes::::mutate(|counter| *counter += 1);
-			assert_eq!(crate::CounterForListNodes::::get(), 5);
+			assert_eq!(crate::ListNodes::::count(), 4);
+			// we do some wacky stuff here to get access to the counter, since it is (reasonably)
+			// not exposed as mutable in any sense.
+			frame_support::generate_storage_alias!(
+				BagsList,
+				CounterForListNodes
+				=> Value
+			);
+			CounterForListNodes::mutate(|counter| *counter += 1);
+			assert_eq!(crate::ListNodes::::count(), 5);
+
 			assert_eq!(List::::sanity_check(), Err("iter_count != stored_count"));
 		});
 	}
diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs
new file mode 100644
index 0000000000000..8c907539c05f1
--- /dev/null
+++ b/frame/bags-list/src/migrations.rs
@@ -0,0 +1,49 @@
+// This file is part of Substrate.
+
+// Copyright (C) 2021 Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: Apache-2.0
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// 	http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! The migrations of this pallet.
+
+use frame_support::traits::OnRuntimeUpgrade;
+
+/// A struct that does not migration, but only checks that the counter prefix exists and is correct.
+pub struct CheckCounterPrefix(sp_std::marker::PhantomData);
+impl OnRuntimeUpgrade for CheckCounterPrefix {
+	fn on_runtime_upgrade() -> frame_support::weights::Weight {
+		0
+	}
+
+	#[cfg(feature = "try-runtime")]
+	fn pre_upgrade() -> Result<(), &'static str> {
+		use frame_support::ensure;
+		// The old explicit storage item.
+		frame_support::generate_storage_alias!(BagsList, CounterForListNodes => Value);
+
+		// ensure that a value exists in the counter struct.
+		ensure!(
+			crate::ListNodes::::count() == CounterForListNodes::get().unwrap(),
+			"wrong list node counter"
+		);
+
+		crate::log!(
+			info,
+			"checked bags-list prefix to be correct and have {} nodes",
+			crate::ListNodes::::count()
+		);
+
+		Ok(())
+	}
+}
diff --git a/frame/bags-list/src/tests.rs b/frame/bags-list/src/tests.rs
index e94017730668b..270d25855ccd4 100644
--- a/frame/bags-list/src/tests.rs
+++ b/frame/bags-list/src/tests.rs
@@ -340,7 +340,7 @@ mod sorted_list_provider {
 		let ensure_left = |id, counter| {
 			assert!(!ListNodes::::contains_key(id));
 			assert_eq!(BagsList::count(), counter);
-			assert_eq!(CounterForListNodes::::get(), counter);
+			assert_eq!(ListNodes::::count(), counter);
 			assert_eq!(ListNodes::::iter().count() as u32, counter);
 		};
 
diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs
index 0860a4ed541c6..51edf10890267 100644
--- a/frame/support/src/storage/types/counted_map.rs
+++ b/frame/support/src/storage/types/counted_map.rs
@@ -31,6 +31,7 @@ use crate::{
 	Never,
 };
 use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref};
+use sp_arithmetic::traits::Bounded;
 use sp_runtime::traits::Saturating;
 use sp_std::prelude::*;
 
@@ -262,9 +263,10 @@ where
 	}
 
 	/// Remove all value of the storage.
-	pub fn remove_all() {
-		CounterFor::::set(0u32);
-		::Map::remove_all(None);
+	pub fn remove_all(maybe_limit: Option) {
+		let leftover = Self::count().saturating_sub(maybe_limit.unwrap_or_else(Bounded::max_value));
+		CounterFor::::set(leftover);
+		::Map::remove_all(maybe_limit);
 	}
 
 	/// Iter over all value of the storage.
@@ -676,7 +678,7 @@ mod test {
 			assert_eq!(A::count(), 2);
 
 			// Remove all.
-			A::remove_all();
+			A::remove_all(None);
 
 			assert_eq!(A::count(), 0);
 			assert_eq!(A::initialize_counter(), 0);
@@ -907,7 +909,7 @@ mod test {
 			assert_eq!(B::count(), 2);
 
 			// Remove all.
-			B::remove_all();
+			B::remove_all(None);
 
 			assert_eq!(B::count(), 0);
 			assert_eq!(B::initialize_counter(), 0);

From 755569d202b4007179cc250279bad55df45b5f7d Mon Sep 17 00:00:00 2001
From: Denis Pisarev 
Date: Wed, 10 Nov 2021 11:39:30 +0100
Subject: [PATCH 076/162] CI: build docs with deps (#9884)

---
 .gitlab-ci.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 210bb447e4c7b..8826b29de9786 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -661,7 +661,7 @@ build-rustdoc:
     - ./crate-docs/
   script:
     # FIXME: it fails with `RUSTDOCFLAGS="-Dwarnings"`
-    - time cargo +nightly doc --no-deps --workspace --all-features --verbose
+    - time cargo +nightly doc --workspace --all-features --verbose
     - rm -f ./target/doc/.lock
     - mv ./target/doc ./crate-docs
     # FIXME: remove me after CI image gets nonroot

From 3374a73c9824638b8270062b104906a714ea14f9 Mon Sep 17 00:00:00 2001
From: Benjamin Kampmann 
Date: Wed, 10 Nov 2021 17:30:24 +0100
Subject: [PATCH 077/162] Intend to reactivate cargo-unleash check (#10167)

* Intend to reactivate cargo-unleash check

It appears the bug it was deactivated for has been resolved a while ago. Trying to reactivate the checks.

* adding missing cargo.toml metadata for BEEFY crates

* fix wrong version reference

* matching up versions

* disable faulty cache

* switching more versions to prerelease

* Revert "disable faulty cache"

This reverts commit 411a12ae444a9695a8bfea4458a868438d870b06.

* bump minor of sc-allocator to fix already-published-issue

* fixup another pre-released dependency problem

* temp switch to latest unleash

* fixing dependency version and features

* prometheus endpoint has also been changed

* fixing proposer metrics versioning

* fixing hex feature for beefy

* fix generate-bags feature selection

* fixup Cargo.lock

* upgrade prometheus dependencies

* missed one

* switch to latest release
---
 .gitlab-ci.yml                                | 28 ++++++-------
 Cargo.lock                                    | 42 +++++++++----------
 client/allocator/Cargo.toml                   |  2 +-
 client/api/Cargo.toml                         |  2 +-
 client/authority-discovery/Cargo.toml         |  2 +-
 client/basic-authorship/Cargo.toml            |  4 +-
 client/beefy/Cargo.toml                       |  4 +-
 client/beefy/rpc/Cargo.toml                   |  2 +
 client/cli/Cargo.toml                         |  2 +-
 client/consensus/aura/Cargo.toml              |  2 +-
 client/consensus/babe/Cargo.toml              |  2 +-
 client/consensus/common/Cargo.toml            |  2 +-
 client/consensus/manual-seal/Cargo.toml       |  2 +-
 client/consensus/pow/Cargo.toml               |  2 +-
 client/executor/Cargo.toml                    |  4 +-
 client/executor/common/Cargo.toml             |  6 +--
 client/executor/wasmi/Cargo.toml              |  2 +-
 client/executor/wasmtime/Cargo.toml           |  2 +-
 client/finality-grandpa/Cargo.toml            |  2 +-
 client/network-gossip/Cargo.toml              |  2 +-
 client/network/Cargo.toml                     |  2 +-
 client/offchain/Cargo.toml                    |  6 +--
 client/proposer-metrics/Cargo.toml            |  4 +-
 client/rpc-servers/Cargo.toml                 |  2 +-
 client/service/Cargo.toml                     |  2 +-
 client/service/test/Cargo.toml                |  2 +-
 client/transaction-pool/Cargo.toml            |  2 +-
 client/utils/Cargo.toml                       |  2 +-
 frame/beefy-mmr/Cargo.toml                    |  1 +
 frame/beefy-mmr/primitives/Cargo.toml         |  3 +-
 frame/beefy/Cargo.toml                        |  2 +
 primitives/arithmetic/Cargo.toml              |  2 +-
 primitives/beefy/Cargo.toml                   |  2 +
 primitives/core/Cargo.toml                    |  4 +-
 primitives/debug-derive/Cargo.toml            |  2 +-
 primitives/maybe-compressed-blob/Cargo.toml   |  2 +-
 primitives/panic-handler/Cargo.toml           |  2 +-
 primitives/serializer/Cargo.toml              |  2 +-
 primitives/state-machine/Cargo.toml           |  2 +-
 primitives/storage/Cargo.toml                 |  2 +-
 utils/frame/generate-bags/Cargo.toml          | 10 ++---
 .../generate-bags/node-runtime/Cargo.toml     |  2 +-
 utils/prometheus/Cargo.toml                   |  8 ++--
 utils/wasm-builder/Cargo.toml                 |  2 +-
 44 files changed, 97 insertions(+), 89 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8826b29de9786..aa275061088d6 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -40,7 +40,7 @@ variables:                         &default-vars
   ARCH:                            "x86_64"
   CI_IMAGE:                        "paritytech/ci-linux:production"
   # FIXME set to release
-  CARGO_UNLEASH_INSTALL_PARAMS:    "--version 1.0.0-alpha.12"
+  CARGO_UNLEASH_INSTALL_PARAMS:    "--version 1.0.0-alpha.13"
   CARGO_UNLEASH_PKG_DEF:           "--skip node node-* pallet-template pallet-example-* subkey chain-spec-builder"
   VAULT_SERVER_URL:                "https://vault.parity-mgmt-vault.parity.io"
   VAULT_AUTH_PATH:                 "gitlab-parity-io-jwt"
@@ -455,20 +455,20 @@ test-linux-stable:                 &test-linux
     - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout
     - sccache -s
 
-#unleash-check:
-  #stage:                           test
-  #<<:                              *docker-env
-  #<<:                              *test-refs-no-trigger
-  #script:
-    #- cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS}
-    #- cargo unleash de-dev-deps
+unleash-check:
+  stage:                           test
+  <<:                              *docker-env
+  <<:                              *test-refs-no-trigger
+  script:
+    - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS}
+    - cargo unleash de-dev-deps
     # Reuse build artifacts when running checks (cuts down check time by 3x)
     # TODO: Implement this optimization in cargo-unleash rather than here
-    #- mkdir -p target/unleash
-    #- export CARGO_TARGET_DIR=target/unleash
-    #- cargo unleash check ${CARGO_UNLEASH_PKG_DEF}
+    - mkdir -p target/unleash
+    - export CARGO_TARGET_DIR=target/unleash
+    - cargo unleash check ${CARGO_UNLEASH_PKG_DEF}
   # FIXME: this job must not fail, or unleash-to-crates-io will publish broken stuff
-  #allow_failure:                   true
+  allow_failure:                   true
 
 test-frame-examples-compile-to-wasm:
   # into one job
@@ -872,9 +872,7 @@ unleash-to-crates-io:
   <<:                              *vault-secrets
   rules:
     - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/
-    # FIXME: wait until https://github.com/paritytech/cargo-unleash/issues/50 is fixed, also
-    # remove allow_failure: true on the check job
-    # - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/              # i.e. v1.0, v2.1rc1
+    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/              # i.e. v1.0, v2.1rc1
   script:
     - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS}
     - cargo unleash em-dragons --no-check --owner github:paritytech:core-devs ${CARGO_UNLEASH_PKG_DEF}
diff --git a/Cargo.lock b/Cargo.lock
index a7bb471f90d8c..69cd80e292f88 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2400,7 +2400,7 @@ checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
 
 [[package]]
 name = "generate-bags"
-version = "3.0.0"
+version = "4.0.0-dev"
 dependencies = [
  "chrono",
  "frame-election-provider-support",
@@ -2698,9 +2698,9 @@ dependencies = [
 
 [[package]]
 name = "httparse"
-version = "1.4.1"
+version = "1.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68"
+checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503"
 
 [[package]]
 name = "httpdate"
@@ -2744,9 +2744,9 @@ dependencies = [
 
 [[package]]
 name = "hyper"
-version = "0.14.11"
+version = "0.14.14"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b61cf2d1aebcf6e6352c97b81dc2244ca29194be1b276f5d8ad5c6330fffb11"
+checksum = "2b91bb1f221b6ea1f1e4371216b70f40748774c2fb5971b450c07773fb92d26b"
 dependencies = [
  "bytes 1.0.1",
  "futures-channel",
@@ -2773,7 +2773,7 @@ checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64"
 dependencies = [
  "ct-logs",
  "futures-util",
- "hyper 0.14.11",
+ "hyper 0.14.14",
  "log 0.4.14",
  "rustls",
  "rustls-native-certs",
@@ -2789,7 +2789,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
 dependencies = [
  "bytes 1.0.1",
- "hyper 0.14.11",
+ "hyper 0.14.14",
  "native-tls",
  "tokio",
  "tokio-native-tls",
@@ -3012,7 +3012,7 @@ checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a"
 dependencies = [
  "derive_more",
  "futures 0.3.16",
- "hyper 0.14.11",
+ "hyper 0.14.14",
  "hyper-tls",
  "jsonrpc-core",
  "jsonrpc-pubsub",
@@ -3068,7 +3068,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff"
 dependencies = [
  "futures 0.3.16",
- "hyper 0.14.11",
+ "hyper 0.14.14",
  "jsonrpc-core",
  "jsonrpc-server-utils",
  "log 0.4.14",
@@ -3164,7 +3164,7 @@ dependencies = [
  "beef",
  "futures-channel",
  "futures-util",
- "hyper 0.14.11",
+ "hyper 0.14.14",
  "log 0.4.14",
  "serde",
  "serde_json",
@@ -6815,9 +6815,9 @@ dependencies = [
 
 [[package]]
 name = "prometheus"
-version = "0.12.0"
+version = "0.13.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5986aa8d62380092d2f50f8b1cdba9cb9b6731ffd4b25b51fd126b6c3e05b99c"
+checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504"
 dependencies = [
  "cfg-if 1.0.0",
  "fnv",
@@ -7584,7 +7584,7 @@ dependencies = [
 
 [[package]]
 name = "sc-allocator"
-version = "4.0.0-dev"
+version = "4.1.0-dev"
 dependencies = [
  "log 0.4.14",
  "sp-core",
@@ -8355,7 +8355,7 @@ dependencies = [
  "futures 0.3.16",
  "futures-timer 3.0.2",
  "hex",
- "hyper 0.14.11",
+ "hyper 0.14.14",
  "hyper-rustls",
  "lazy_static",
  "log 0.4.14",
@@ -8397,7 +8397,7 @@ dependencies = [
 
 [[package]]
 name = "sc-proposer-metrics"
-version = "0.9.0"
+version = "0.10.0-dev"
 dependencies = [
  "log 0.4.14",
  "substrate-prometheus-endpoint",
@@ -9496,7 +9496,7 @@ dependencies = [
 
 [[package]]
 name = "sp-debug-derive"
-version = "3.0.0"
+version = "4.0.0-dev"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -9597,7 +9597,7 @@ dependencies = [
 
 [[package]]
 name = "sp-maybe-compressed-blob"
-version = "4.0.0-dev"
+version = "4.1.0-dev"
 dependencies = [
  "zstd",
 ]
@@ -9657,7 +9657,7 @@ dependencies = [
 
 [[package]]
 name = "sp-panic-handler"
-version = "3.0.0"
+version = "4.0.0-dev"
 dependencies = [
  "backtrace",
  "lazy_static",
@@ -9788,7 +9788,7 @@ dependencies = [
 
 [[package]]
 name = "sp-serializer"
-version = "3.0.0"
+version = "4.0.0-dev"
 dependencies = [
  "serde",
  "serde_json",
@@ -10171,12 +10171,12 @@ dependencies = [
 
 [[package]]
 name = "substrate-prometheus-endpoint"
-version = "0.9.0"
+version = "0.10.0-dev"
 dependencies = [
  "async-std",
  "derive_more",
  "futures-util",
- "hyper 0.14.11",
+ "hyper 0.14.14",
  "log 0.4.14",
  "prometheus",
  "tokio",
diff --git a/client/allocator/Cargo.toml b/client/allocator/Cargo.toml
index 2b37c192c6e3e..6d324b09acde5 100644
--- a/client/allocator/Cargo.toml
+++ b/client/allocator/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "sc-allocator"
-version = "4.0.0-dev"
+version = "4.1.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml
index 7b2952552a3d0..431d6e2fb0157 100644
--- a/client/api/Cargo.toml
+++ b/client/api/Cargo.toml
@@ -36,7 +36,7 @@ sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-mach
 sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" }
 sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage" }
 sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" }
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" }
 
 [dev-dependencies]
 sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" }
diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml
index dc4b929756810..b1d9d4ebd3935 100644
--- a/client/authority-discovery/Cargo.toml
+++ b/client/authority-discovery/Cargo.toml
@@ -25,7 +25,7 @@ futures-timer = "3.0.1"
 ip_network = "0.4.0"
 libp2p = { version = "0.39.1", default-features = false, features = ["kad"] }
 log = "0.4.8"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" }
 prost = "0.8"
 rand = "0.7.2"
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml
index 96ab698f36213..4bfc3ca50c83f 100644
--- a/client/basic-authorship/Cargo.toml
+++ b/client/basic-authorship/Cargo.toml
@@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0" }
 futures = "0.3.9"
 futures-timer = "3.0.1"
 log = "0.4.8"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"}
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev"}
 sp-api = { version = "4.0.0-dev", path = "../../primitives/api" }
 sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" }
 sp-core = { version = "4.0.0-dev", path = "../../primitives/core" }
@@ -28,7 +28,7 @@ sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" }
 sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" }
 sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../client/transaction-pool/api" }
 sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
-sc-proposer-metrics = { version = "0.9.0", path = "../proposer-metrics" }
+sc-proposer-metrics = { version = "0.10.0-dev", path = "../proposer-metrics" }
 
 [dev-dependencies]
 sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" }
diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml
index 60f9fde030800..96e5bc4ffbf31 100644
--- a/client/beefy/Cargo.toml
+++ b/client/beefy/Cargo.toml
@@ -4,6 +4,8 @@ version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+repository = "https://github.com/paritytech/substrate"
+description = "BEEFY Client gadget for substrate"
 
 [dependencies]
 fnv = "1.0.6"
@@ -14,7 +16,7 @@ thiserror = "1.0"
 wasm-timer = "0.2.5"
 
 codec = { version = "2.2.0", package = "parity-scale-codec", features = ["derive"] }
-prometheus = { version = "0.9.0", package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" }
+prometheus = { version = "0.10.0-dev", package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" }
 
 sp-api = { version = "4.0.0-dev", path = "../../primitives/api" }
 sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" }
diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml
index 47fd2b740370c..594736841ed27 100644
--- a/client/beefy/rpc/Cargo.toml
+++ b/client/beefy/rpc/Cargo.toml
@@ -4,6 +4,8 @@ version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+repository = "https://github.com/paritytech/substrate"
+description = "RPC for the BEEFY Client gadget for substrate"
 
 [dependencies]
 futures = "0.3.16"
diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml
index 4b6ef3bab9505..6107206be34e8 100644
--- a/client/cli/Cargo.toml
+++ b/client/cli/Cargo.toml
@@ -25,7 +25,7 @@ rand = "0.7.3"
 tiny-bip39 = "0.8.2"
 serde_json = "1.0.68"
 sc-keystore = { version = "4.0.0-dev", path = "../keystore" }
-sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" }
+sp-panic-handler = { version = "4.0.0-dev", path = "../../primitives/panic-handler" }
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
 sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" }
 sc-network = { version = "0.10.0-dev", path = "../network" }
diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml
index 62d7b3d5327ad..152c40f78f9d4 100644
--- a/client/consensus/aura/Cargo.toml
+++ b/client/consensus/aura/Cargo.toml
@@ -33,7 +33,7 @@ sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" }
 sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" }
 sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" }
 sc-telemetry = { version = "4.0.0-dev", path = "../../telemetry" }
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.10.0-dev" }
 async-trait = "0.1.50"
 # We enable it only for web-wasm check
 # See https://docs.rs/getrandom/0.2.1/getrandom/#webassembly-support
diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml
index 8430ca39c9cb7..7945ecc4ec793 100644
--- a/client/consensus/babe/Cargo.toml
+++ b/client/consensus/babe/Cargo.toml
@@ -42,7 +42,7 @@ sp-consensus-vrf = { version = "0.10.0-dev", path = "../../../primitives/consens
 sc-consensus-slots = { version = "0.10.0-dev", path = "../slots" }
 sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" }
 fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" }
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.10.0-dev" }
 futures = "0.3.9"
 parking_lot = "0.11.1"
 log = "0.4.8"
diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml
index d63d124ed60e5..3f8380d3f81bc 100644
--- a/client/consensus/common/Cargo.toml
+++ b/client/consensus/common/Cargo.toml
@@ -28,7 +28,7 @@ sc-utils = { version = "4.0.0-dev", path = "../../utils" }
 sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" }
 parking_lot = "0.11.1"
 serde = { version = "1.0", features = ["derive"] }
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.10.0-dev" }
 async-trait = "0.1.42"
 
 [dev-dependencies]
diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml
index a662ebf01011d..a7679f53ea9e8 100644
--- a/client/consensus/manual-seal/Cargo.toml
+++ b/client/consensus/manual-seal/Cargo.toml
@@ -42,7 +42,7 @@ sp-api = { path = "../../../primitives/api", version = "4.0.0-dev" }
 sc-transaction-pool-api = { path = "../../../client/transaction-pool/api", version = "4.0.0-dev" }
 sp-timestamp = { path = "../../../primitives/timestamp", version = "4.0.0-dev" }
 
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.10.0-dev" }
 
 [dev-dependencies]
 tokio = { version = "1.10.0", features = ["rt-multi-thread", "macros"] }
diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml
index bc7c01181e41b..841631fce7cc9 100644
--- a/client/consensus/pow/Cargo.toml
+++ b/client/consensus/pow/Cargo.toml
@@ -29,5 +29,5 @@ futures = "0.3.16"
 futures-timer = "3.0.1"
 parking_lot = "0.11.1"
 derive_more = "0.99.2"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"}
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.10.0-dev"}
 async-trait = "0.1.50"
diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml
index ef66d8072b537..54c4a91d72805 100644
--- a/client/executor/Cargo.toml
+++ b/client/executor/Cargo.toml
@@ -20,7 +20,7 @@ sp-core = { version = "4.0.0-dev", path = "../../primitives/core" }
 sp-tasks = { version = "4.0.0-dev", path = "../../primitives/tasks" }
 sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" }
 sp-version = { version = "4.0.0-dev", path = "../../primitives/version" }
-sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" }
+sp-panic-handler = { version = "4.0.0-dev", path = "../../primitives/panic-handler" }
 wasmi = "0.9.1"
 lazy_static = "1.4.0"
 sp-api = { version = "4.0.0-dev", path = "../../primitives/api" }
@@ -42,7 +42,7 @@ sc-runtime-test = { version = "2.0.0", path = "runtime-test" }
 substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" }
 sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" }
 sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" }
-sp-maybe-compressed-blob = { version = "4.0.0-dev", path = "../../primitives/maybe-compressed-blob" }
+sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" }
 sc-tracing = { version = "4.0.0-dev", path = "../tracing" }
 tracing = "0.1.29"
 tracing-subscriber = "0.2.19"
diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml
index 7ac747bf967bd..2d9f4672768e9 100644
--- a/client/executor/common/Cargo.toml
+++ b/client/executor/common/Cargo.toml
@@ -19,10 +19,10 @@ pwasm-utils = "0.18.0"
 codec = { package = "parity-scale-codec", version = "2.0.0" }
 wasmi = "0.9.1"
 sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" }
-sc-allocator = { version = "4.0.0-dev", path = "../../allocator" }
+sc-allocator = { version = "4.1.0-dev", path = "../../allocator" }
 sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" }
-sp-maybe-compressed-blob = { version = "4.0.0-dev", path = "../../../primitives/maybe-compressed-blob" }
-sp-serializer = { version = "3.0.0", path = "../../../primitives/serializer" }
+sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" }
+sp-serializer = { version = "4.0.0-dev", path = "../../../primitives/serializer" }
 thiserror = "1.0.21"
 environmental = "1.1.3"
 
diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml
index 255a470b374da..871a8aa68b626 100644
--- a/client/executor/wasmi/Cargo.toml
+++ b/client/executor/wasmi/Cargo.toml
@@ -18,7 +18,7 @@ log = "0.4.8"
 wasmi = "0.9.1"
 codec = { package = "parity-scale-codec", version = "2.0.0" }
 sc-executor-common = { version = "0.10.0-dev", path = "../common" }
-sc-allocator = { version = "4.0.0-dev", path = "../../allocator" }
+sc-allocator = { version = "4.1.0-dev", path = "../../allocator" }
 sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" }
 sp-runtime-interface = { version = "4.0.0-dev", path = "../../../primitives/runtime-interface" }
 sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" }
diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml
index e52a53f71c06e..006415869c925 100644
--- a/client/executor/wasmtime/Cargo.toml
+++ b/client/executor/wasmtime/Cargo.toml
@@ -22,7 +22,7 @@ sc-executor-common = { version = "0.10.0-dev", path = "../common" }
 sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" }
 sp-runtime-interface = { version = "4.0.0-dev", path = "../../../primitives/runtime-interface" }
 sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" }
-sc-allocator = { version = "4.0.0-dev", path = "../../allocator" }
+sc-allocator = { version = "4.1.0-dev", path = "../../allocator" }
 wasmtime = { version = "0.31.0", default-features = false, features = [
     "cache",
     "cranelift",
diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml
index fd0b52cd1f70b..43511ea59f146 100644
--- a/client/finality-grandpa/Cargo.toml
+++ b/client/finality-grandpa/Cargo.toml
@@ -41,7 +41,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" }
 sc-network = { version = "0.10.0-dev", path = "../network" }
 sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" }
 sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" }
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" }
 sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
 finality-grandpa = { version = "0.14.4", features = ["derive-codec"] }
 async-trait = "0.1.50"
diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml
index e17d9601eafc3..e11cb8dbe85d6 100644
--- a/client/network-gossip/Cargo.toml
+++ b/client/network-gossip/Cargo.toml
@@ -20,7 +20,7 @@ futures-timer = "3.0.1"
 libp2p = { version = "0.39.1", default-features = false }
 log = "0.4.8"
 lru = "0.7.0"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" }
 sc-network = { version = "0.10.0-dev", path = "../network" }
 sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" }
 tracing = "0.1.29"
diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml
index 19ae5dd97e425..eb91dd145e549 100644
--- a/client/network/Cargo.toml
+++ b/client/network/Cargo.toml
@@ -40,7 +40,7 @@ lru = "0.7.0"
 log = "0.4.8"
 parking_lot = "0.11.1"
 pin-project = "1.0.8"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" }
 prost = "0.8"
 rand = "0.7.2"
 sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" }
diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml
index ee697b32e1e4b..c93eec85888dc 100644
--- a/client/offchain/Cargo.toml
+++ b/client/offchain/Cargo.toml
@@ -17,8 +17,8 @@ bytes = "1.0"
 codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] }
 hex = "0.4"
 fnv = "1.0.6"
-futures = "0.3.9"
-futures-timer = "3.0.1"
+futures = "0.3.16"
+futures-timer = "3.0.2"
 log = "0.4.8"
 num_cpus = "1.10"
 parking_lot = "0.11.1"
@@ -31,7 +31,7 @@ sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" }
 sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" }
 sc-utils = { version = "4.0.0-dev", path = "../utils" }
 threadpool = "1.7"
-hyper = "0.14.11"
+hyper = { version = "0.14.14", features = ["stream"] }
 hyper-rustls = "0.22.1"
 once_cell = "1.8"
 
diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml
index 93c4cce93ca65..1a4b1fd4ce2c9 100644
--- a/client/proposer-metrics/Cargo.toml
+++ b/client/proposer-metrics/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "sc-proposer-metrics"
-version = "0.9.0"
+version = "0.10.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
@@ -14,4 +14,4 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 log = "0.4.8"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"}
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev"}
diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml
index b3f408cc59806..f0ae6172d6d9e 100644
--- a/client/rpc-servers/Cargo.toml
+++ b/client/rpc-servers/Cargo.toml
@@ -17,7 +17,7 @@ futures = "0.3.16"
 jsonrpc-core = "18.0.0"
 pubsub = { package = "jsonrpc-pubsub", version = "18.0.0" }
 log = "0.4.8"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"}
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev"}
 serde_json = "1.0.68"
 tokio = "1.10"
 http = { package = "jsonrpc-http-server", version = "18.0.0" }
diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml
index 5c22c1e4fca1c..97b0f4e461c87 100644
--- a/client/service/Cargo.toml
+++ b/client/service/Cargo.toml
@@ -69,7 +69,7 @@ sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-build
 sc-informant = { version = "0.10.0-dev", path = "../informant" }
 sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" }
 sc-offchain = { version = "4.0.0-dev", path = "../offchain" }
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0" }
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" }
 sc-tracing = { version = "4.0.0-dev", path = "../tracing" }
 sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" }
 tracing = "0.1.29"
diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml
index 49eca272ac75c..03967db15f678 100644
--- a/client/service/test/Cargo.toml
+++ b/client/service/test/Cargo.toml
@@ -40,6 +40,6 @@ substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils
 sc-client-api = { version = "4.0.0-dev", path = "../../api" }
 sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" }
 sc-executor = { version = "0.10.0-dev", path = "../../executor" }
-sp-panic-handler = { version = "3.0.0", path = "../../../primitives/panic-handler" }
+sp-panic-handler = { version = "4.0.0-dev", path = "../../../primitives/panic-handler" }
 parity-scale-codec = "2.3.1"
 sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" }
diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml
index 6d05125002f2e..99f1f3788ec0e 100644
--- a/client/transaction-pool/Cargo.toml
+++ b/client/transaction-pool/Cargo.toml
@@ -20,7 +20,7 @@ intervalier = "0.4.0"
 log = "0.4.8"
 parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
 parking_lot = "0.11.1"
-prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"}
+prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev"}
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
 sp-api = { version = "4.0.0-dev", path = "../../primitives/api" }
 sp-core = { version = "4.0.0-dev", path = "../../primitives/core" }
diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml
index 6d04fd4e9acdf..827164b702c6f 100644
--- a/client/utils/Cargo.toml
+++ b/client/utils/Cargo.toml
@@ -12,7 +12,7 @@ readme = "README.md"
 [dependencies]
 futures = "0.3.9"
 lazy_static = "1.4.0"
-prometheus = { version = "0.12.0", default-features = false }
+prometheus = { version = "0.13.0", default-features = false }
 futures-timer = "3.0.2"
 
 [features]
diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml
index 0a72ee193b3a7..b99b6f7e9feaa 100644
--- a/frame/beefy-mmr/Cargo.toml
+++ b/frame/beefy-mmr/Cargo.toml
@@ -5,6 +5,7 @@ authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
 description = "BEEFY + MMR runtime utilities"
+repository = "https://github.com/paritytech/substrate"
 
 [dependencies]
 hex = { version = "0.4", optional = true }
diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml
index 3669ba4c2286c..b54ac225e7818 100644
--- a/frame/beefy-mmr/primitives/Cargo.toml
+++ b/frame/beefy-mmr/primitives/Cargo.toml
@@ -4,6 +4,7 @@ version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
+repository = "https://github.com/paritytech/substrate"
 description = "A no-std/Substrate compatible library to construct binary merkle tree."
 
 [dependencies]
@@ -17,7 +18,7 @@ hex = "0.4"
 hex-literal = "0.3"
 
 [features]
-debug = ["hex", "log"]
+debug = ["hex", "hex/std", "log"]
 default = ["std", "debug", "keccak"]
 keccak = ["tiny-keccak"]
 std = []
diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml
index a8b516aac66ca..7e1cec9d438ea 100644
--- a/frame/beefy/Cargo.toml
+++ b/frame/beefy/Cargo.toml
@@ -4,6 +4,8 @@ version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
+repository = "https://github.com/paritytech/substrate"
+description = "BEEFY FRAME pallet"
 
 [dependencies]
 codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] }
diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml
index e6fa1759774ed..139a04180828c 100644
--- a/primitives/arithmetic/Cargo.toml
+++ b/primitives/arithmetic/Cargo.toml
@@ -24,7 +24,7 @@ static_assertions = "1.1.0"
 num-traits = { version = "0.2.8", default-features = false }
 sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" }
 serde = { version = "1.0.126", optional = true, features = ["derive"] }
-sp-debug-derive = { version = "3.0.0", default-features = false, path = "../debug-derive" }
+sp-debug-derive = { version = "4.0.0-dev", default-features = false, path = "../debug-derive" }
 
 [dev-dependencies]
 rand = "0.7.2"
diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml
index 83472f54c5135..23e98012027c7 100644
--- a/primitives/beefy/Cargo.toml
+++ b/primitives/beefy/Cargo.toml
@@ -4,6 +4,8 @@ version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
+repository = "https://github.com/paritytech/substrate"
+description = "Primitives for BEEFY protocol."
 
 [dependencies]
 codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] }
diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml
index 9948b2e968c26..9e31ba644c250 100644
--- a/primitives/core/Cargo.toml
+++ b/primitives/core/Cargo.toml
@@ -40,7 +40,7 @@ zeroize = { version = "1.4.2", default-features = false }
 secrecy = { version = "0.8.0", default-features = false }
 lazy_static = { version = "1.4.0", default-features = false, optional = true }
 parking_lot = { version = "0.11.1", optional = true }
-sp-debug-derive = { version = "3.0.0", default-features = false, path = "../debug-derive" }
+sp-debug-derive = { version = "4.0.0-dev", default-features = false, path = "../debug-derive" }
 sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" }
 sp-storage = { version = "4.0.0-dev", default-features = false, path = "../storage" }
 parity-util-mem = { version = "0.10.0", default-features = false, features = [
@@ -72,7 +72,7 @@ ss58-registry = { version = "1.5.0", default-features = false }
 sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" }
 
 [dev-dependencies]
-sp-serializer = { version = "3.0.0", path = "../serializer" }
+sp-serializer = { version = "4.0.0-dev", path = "../serializer" }
 hex-literal = "0.3.3"
 rand = "0.7.2"
 criterion = "0.3.3"
diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml
index a2f77b7591fe1..49b8ac832cf0f 100644
--- a/primitives/debug-derive/Cargo.toml
+++ b/primitives/debug-derive/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "sp-debug-derive"
-version = "3.0.0"
+version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
diff --git a/primitives/maybe-compressed-blob/Cargo.toml b/primitives/maybe-compressed-blob/Cargo.toml
index 95ba9b3324127..dbed41571b149 100644
--- a/primitives/maybe-compressed-blob/Cargo.toml
+++ b/primitives/maybe-compressed-blob/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "sp-maybe-compressed-blob"
-version = "4.0.0-dev"
+version = "4.1.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml
index 0845e175c2377..b7f3b6b5cb4da 100644
--- a/primitives/panic-handler/Cargo.toml
+++ b/primitives/panic-handler/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "sp-panic-handler"
-version = "3.0.0"
+version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml
index 5b2d499279a47..2b2acb8dbc373 100644
--- a/primitives/serializer/Cargo.toml
+++ b/primitives/serializer/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "sp-serializer"
-version = "3.0.0"
+version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml
index 343151a50a927..c919d7fbef6ad 100644
--- a/primitives/state-machine/Cargo.toml
+++ b/primitives/state-machine/Cargo.toml
@@ -22,7 +22,7 @@ trie-db = { version = "0.22.6", default-features = false }
 trie-root = { version = "0.16.0", default-features = false }
 sp-trie = { version = "4.0.0-dev", path = "../trie", default-features = false }
 sp-core = { version = "4.0.0-dev", path = "../core", default-features = false }
-sp-panic-handler = { version = "3.0.0", path = "../panic-handler", optional = true }
+sp-panic-handler = { version = "4.0.0-dev", path = "../panic-handler", optional = true }
 codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
 num-traits = { version = "0.2.8", default-features = false }
 rand = { version = "0.7.2", optional = true }
diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml
index 2413c45a7312e..c9166a5bbba07 100644
--- a/primitives/storage/Cargo.toml
+++ b/primitives/storage/Cargo.toml
@@ -18,7 +18,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" }
 serde = { version = "1.0.126", optional = true, features = ["derive"] }
 impl-serde = { version = "0.3.1", optional = true }
 ref-cast = "1.0.0"
-sp-debug-derive = { version = "3.0.0", default-features = false, path = "../debug-derive" }
+sp-debug-derive = { version = "4.0.0-dev", default-features = false, path = "../debug-derive" }
 codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
 
 [features]
diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml
index 03382878710b7..225584b69069e 100644
--- a/utils/frame/generate-bags/Cargo.toml
+++ b/utils/frame/generate-bags/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "generate-bags"
-version = "3.0.0"
+version = "4.0.0-dev"
 authors = ["Parity Technologies "]
 edition = "2021"
 license = "Apache-2.0"
@@ -11,13 +11,13 @@ readme = "README.md"
 
 [dependencies]
 # FRAME
-frame-support = { version = "4.0.0-dev", default-features = false, path = "../../../frame/support" }
+frame-support = { version = "4.0.0-dev", path = "../../../frame/support" }
 frame-election-provider-support = { version = "4.0.0-dev", path = "../../../frame/election-provider-support", features = ["runtime-benchmarks"] }
-frame-system = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system" }
-pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking" }
+frame-system = { version = "4.0.0-dev", path = "../../../frame/system" }
+pallet-staking = { version = "4.0.0-dev", path = "../../../frame/staking" }
 
 # primitives
-sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" }
+sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" }
 
 # third party
 chrono = { version = "0.4.19" }
diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml
index 5029e049361c9..b0256722f466c 100644
--- a/utils/frame/generate-bags/node-runtime/Cargo.toml
+++ b/utils/frame/generate-bags/node-runtime/Cargo.toml
@@ -11,7 +11,7 @@ readme = "README.md"
 
 [dependencies]
 node-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runtime" }
-generate-bags = { version = "3.0.0", path = "../" }
+generate-bags = { version = "4.0.0-dev", path = "../" }
 
 # third-party
 structopt = "0.3.21"
diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml
index 78bd68ac9e3a5..390a1c733cbd7 100644
--- a/utils/prometheus/Cargo.toml
+++ b/utils/prometheus/Cargo.toml
@@ -1,7 +1,7 @@
 [package]
 description = "Endpoint to expose Prometheus metrics"
 name = "substrate-prometheus-endpoint"
-version = "0.9.0"
+version = "0.10.0-dev"
 license = "Apache-2.0"
 authors = ["Parity Technologies "]
 edition = "2021"
@@ -14,13 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 log = "0.4.8"
-prometheus = { version = "0.12.0", default-features = false }
+prometheus = { version = "0.13.0", default-features = false }
 futures-util = { version = "0.3.17", default-features = false, features = ["io"] }
 derive_more = "0.99"
 async-std = { version = "1.10.0", features = ["unstable"] }
 tokio = "1.10"
-hyper = { version = "0.14.11", default-features = false, features = ["http1", "server", "tcp"] }
+hyper = { version = "0.14.14", default-features = false, features = ["http1", "server", "tcp"] }
 
 [dev-dependencies]
-hyper = { version = "0.14.11", features = ["client"] }
+hyper = { version = "0.14.14", features = ["client"] }
 tokio = { version = "1.10", features = ["rt-multi-thread"] }
diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml
index d8802c432f55d..88318f4f0d54c 100644
--- a/utils/wasm-builder/Cargo.toml
+++ b/utils/wasm-builder/Cargo.toml
@@ -20,4 +20,4 @@ toml = "0.5.4"
 walkdir = "2.3.2"
 wasm-gc-api = "0.1.11"
 ansi_term = "0.12.1"
-sp-maybe-compressed-blob = { version = "4.0.0-dev", path = "../../primitives/maybe-compressed-blob" }
+sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" }

From 2ab769819e8a3ee869459b78cbb98cc749728cc3 Mon Sep 17 00:00:00 2001
From: sander2 
Date: Thu, 11 Nov 2021 09:23:34 +0100
Subject: [PATCH 078/162] expose substrate-cli service (#10229)

* expose substrate-cli service

* chore: undo change to new_full (since new_full_base is exposed already)
---
 bin/node/cli/src/service.rs | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs
index 2220614ebaf2a..a022d501d0155 100644
--- a/bin/node/cli/src/service.rs
+++ b/bin/node/cli/src/service.rs
@@ -30,7 +30,7 @@ use sc_client_api::{BlockBackend, ExecutorProvider};
 use sc_consensus_babe::{self, SlotProportion};
 use sc_executor::NativeElseWasmExecutor;
 use sc_network::{Event, NetworkService};
-use sc_service::{config::Configuration, error::Error as ServiceError, TaskManager};
+use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager};
 use sc_telemetry::{Telemetry, TelemetryWorker};
 use sp_api::ProvideRuntimeApi;
 use sp_core::crypto::Pair;
@@ -298,6 +298,8 @@ pub struct NewFullBase {
 	pub network: Arc::Hash>>,
 	/// The transaction pool of the node.
 	pub transaction_pool: Arc,
+	/// The rpc handlers of the node.
+	pub rpc_handlers: RpcHandlers,
 }
 
 /// Creates a full service from the configuration.
@@ -358,7 +360,7 @@ pub fn new_full_base(
 	let enable_grandpa = !config.disable_grandpa;
 	let prometheus_registry = config.prometheus_registry().cloned();
 
-	let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+	let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
 		config,
 		backend,
 		client: client.clone(),
@@ -507,7 +509,7 @@ pub fn new_full_base(
 	}
 
 	network_starter.start_network();
-	Ok(NewFullBase { task_manager, client, network, transaction_pool })
+	Ok(NewFullBase { task_manager, client, network, transaction_pool, rpc_handlers })
 }
 
 /// Builds a new service for a full client.

From 7b1c81f5b4966548fc281d3d164e317e338ef2d6 Mon Sep 17 00:00:00 2001
From: David 
Date: Thu, 11 Nov 2021 12:29:28 +0100
Subject: [PATCH 079/162] Upgrade jsonrpsee to v0.4.1 (#10022)

* Upgrade jsonrpsee to v0.4.1

* remove needless BlockT trait bound

* use default wss port in URL

* Fix try_runtime build

* Partially fix for "remote-tests" feature

* Review feedback

* fmt

* Sort out trait bounds for benches

* Fmt

* fmt again?

* fmt with nightly-2021-09-13

* Upgrade try-runtime as well

* fmt

Co-authored-by: Niklas Adolfsson 
---
 Cargo.lock                                    | 126 +++++++-----------
 bin/node/cli/Cargo.toml                       |   2 +-
 frame/bags-list/remote-tests/src/migration.rs |   4 +-
 .../remote-tests/src/sanity_check.rs          |   4 +-
 frame/bags-list/remote-tests/src/snapshot.rs  |   4 +-
 .../procedural/src/construct_runtime/parse.rs |   2 +-
 utils/frame/remote-externalities/Cargo.toml   |   5 +-
 utils/frame/remote-externalities/src/lib.rs   | 109 ++++++++-------
 .../frame/remote-externalities/src/rpc_api.rs |  27 ++--
 utils/frame/try-runtime/cli/Cargo.toml        |   4 +-
 .../cli/src/commands/follow_chain.rs          |   8 +-
 utils/frame/try-runtime/cli/src/lib.rs        |   7 +-
 12 files changed, 128 insertions(+), 174 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 69cd80e292f88..5ae937ad22d4f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -144,9 +144,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
 
 [[package]]
 name = "arrayvec"
-version = "0.7.0"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a2f58b0bb10c380af2b26e57212856b8c9a59e0925b4c20f4a174a49734eaf7"
+checksum = "be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd"
 
 [[package]]
 name = "asn1_der"
@@ -419,19 +419,6 @@ dependencies = [
  "rustc-demangle",
 ]
 
-[[package]]
-name = "bae"
-version = "0.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec107f431ee3d8a8e45e6dd117adab769556ef463959e77bf6a4888d5fd500cf"
-dependencies = [
- "heck",
- "proc-macro-error 0.4.12",
- "proc-macro2",
- "quote",
- "syn",
-]
-
 [[package]]
 name = "base-x"
 version = "0.2.8"
@@ -477,9 +464,9 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
 
 [[package]]
 name = "beef"
-version = "0.5.0"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409"
+checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736"
 dependencies = [
  "serde",
 ]
@@ -1634,7 +1621,7 @@ dependencies = [
  "bitflags",
  "byteorder",
  "lazy_static",
- "proc-macro-error 1.0.4",
+ "proc-macro-error",
  "proc-macro2",
  "quote",
  "syn",
@@ -3140,14 +3127,25 @@ dependencies = [
  "slab",
 ]
 
+[[package]]
+name = "jsonrpsee"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6373a33d987866ccfe1af4bc11b089dce941764313f9fd8b7cf13fcb51b72dc5"
+dependencies = [
+ "jsonrpsee-proc-macros",
+ "jsonrpsee-types",
+ "jsonrpsee-utils",
+ "jsonrpsee-ws-client",
+]
+
 [[package]]
 name = "jsonrpsee-proc-macros"
-version = "0.3.1"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8edb341d35279b59c79d7fe9e060a51aec29d45af99cc7c72ea7caa350fa71a4"
+checksum = "d802063f7a3c867456955f9d2f15eb3ee0edb5ec9ec2b5526324756759221c0f"
 dependencies = [
- "Inflector",
- "bae",
+ "log 0.4.14",
  "proc-macro-crate 1.0.0",
  "proc-macro2",
  "quote",
@@ -3156,10 +3154,11 @@ dependencies = [
 
 [[package]]
 name = "jsonrpsee-types"
-version = "0.3.1"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4cc738fd55b676ada3271ef7c383a14a0867a2a88b0fa941311bf5fc0a29d498"
+checksum = "62f778cf245158fbd8f5d50823a2e9e4c708a40be164766bd35e9fb1d86715b2"
 dependencies = [
+ "anyhow",
  "async-trait",
  "beef",
  "futures-channel",
@@ -3168,32 +3167,43 @@ dependencies = [
  "log 0.4.14",
  "serde",
  "serde_json",
- "soketto 0.6.0",
+ "soketto 0.7.0",
  "thiserror",
 ]
 
+[[package]]
+name = "jsonrpsee-utils"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0109c4f972058f3b1925b73a17210aff7b63b65967264d0045d15ee88fe84f0c"
+dependencies = [
+ "arrayvec 0.7.1",
+ "beef",
+ "jsonrpsee-types",
+]
+
 [[package]]
 name = "jsonrpsee-ws-client"
-version = "0.3.1"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9841352dbecf4c2ed5dc71698df9f1660262ae4e0b610e968602529bdbcf7b30"
+checksum = "559aa56fc402af206c00fc913dc2be1d9d788dcde045d14df141a535245d35ef"
 dependencies = [
+ "arrayvec 0.7.1",
  "async-trait",
  "fnv",
  "futures 0.3.16",
+ "http",
  "jsonrpsee-types",
  "log 0.4.14",
  "pin-project 1.0.8",
- "rustls",
  "rustls-native-certs",
  "serde",
  "serde_json",
- "soketto 0.6.0",
+ "soketto 0.7.0",
  "thiserror",
  "tokio",
  "tokio-rustls",
  "tokio-util",
- "url 2.2.1",
 ]
 
 [[package]]
@@ -4353,7 +4363,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99"
 dependencies = [
  "proc-macro-crate 1.0.0",
- "proc-macro-error 1.0.4",
+ "proc-macro-error",
  "proc-macro2",
  "quote",
  "syn",
@@ -6249,7 +6259,7 @@ version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909"
 dependencies = [
- "arrayvec 0.7.0",
+ "arrayvec 0.7.1",
  "bitvec 0.20.2",
  "byte-slice-cast",
  "impl-trait-for-tuples",
@@ -6742,45 +6752,19 @@ dependencies = [
  "toml",
 ]
 
-[[package]]
-name = "proc-macro-error"
-version = "0.4.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7"
-dependencies = [
- "proc-macro-error-attr 0.4.12",
- "proc-macro2",
- "quote",
- "syn",
- "version_check 0.9.2",
-]
-
 [[package]]
 name = "proc-macro-error"
 version = "1.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
 dependencies = [
- "proc-macro-error-attr 1.0.4",
+ "proc-macro-error-attr",
  "proc-macro2",
  "quote",
  "syn",
  "version_check 0.9.2",
 ]
 
-[[package]]
-name = "proc-macro-error-attr"
-version = "0.4.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
- "syn-mid",
- "version_check 0.9.2",
-]
-
 [[package]]
 name = "proc-macro-error-attr"
 version = "1.0.4"
@@ -7365,8 +7349,7 @@ version = "0.10.0-dev"
 dependencies = [
  "env_logger 0.9.0",
  "frame-support",
- "jsonrpsee-proc-macros",
- "jsonrpsee-ws-client",
+ "jsonrpsee",
  "log 0.4.14",
  "pallet-elections-phragmen",
  "parity-scale-codec",
@@ -9157,9 +9140,9 @@ dependencies = [
 
 [[package]]
 name = "soketto"
-version = "0.6.0"
+version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a74e48087dbeed4833785c2f3352b59140095dc192dce966a3bfc155020a439f"
+checksum = "083624472e8817d44d02c0e55df043737ff11f279af924abdf93845717c2b75c"
 dependencies = [
  "base64 0.13.0",
  "bytes 1.0.1",
@@ -10062,7 +10045,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba"
 dependencies = [
  "heck",
- "proc-macro-error 1.0.4",
+ "proc-macro-error",
  "proc-macro2",
  "quote",
  "syn",
@@ -10355,17 +10338,6 @@ dependencies = [
  "unicode-xid",
 ]
 
-[[package]]
-name = "syn-mid"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baa8e7560a164edb1621a55d18a0c59abf49d360f47aa7b821061dd7eea7fac9"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
 [[package]]
 name = "synstructure"
 version = "0.12.4"
@@ -10974,7 +10946,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
 name = "try-runtime-cli"
 version = "0.10.0-dev"
 dependencies = [
- "jsonrpsee-ws-client",
+ "jsonrpsee",
  "log 0.4.14",
  "parity-scale-codec",
  "remote-externalities",
@@ -11447,7 +11419,7 @@ version = "1.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d8b86dcd2c3efdb8390728a2b56f762db07789aaa5aa872a9dc776ba3a7912ed"
 dependencies = [
- "proc-macro-error 1.0.4",
+ "proc-macro-error",
  "proc-macro2",
  "quote",
  "syn",
diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml
index 15760c5a9abb4..fc39e47ce4113 100644
--- a/bin/node/cli/Cargo.toml
+++ b/bin/node/cli/Cargo.toml
@@ -130,7 +130,7 @@ async-std = { version = "1.10.0", features = ["attributes"] }
 soketto = "0.4.2"
 criterion = { version = "0.3.5", features = [ "async_tokio" ] }
 tokio = { version = "1.10", features = ["macros", "time"] }
-jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = ["tokio1"] }
+jsonrpsee-ws-client = "0.4.1"
 wait-timeout = "0.2"
 remote-externalities = { path = "../../../utils/frame/remote-externalities" }
 pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" }
diff --git a/frame/bags-list/remote-tests/src/migration.rs b/frame/bags-list/remote-tests/src/migration.rs
index 1e977011f1439..86595c7feba9d 100644
--- a/frame/bags-list/remote-tests/src/migration.rs
+++ b/frame/bags-list/remote-tests/src/migration.rs
@@ -21,11 +21,11 @@ use frame_election_provider_support::SortedListProvider;
 use frame_support::traits::PalletInfoAccess;
 use pallet_staking::Nominators;
 use remote_externalities::{Builder, Mode, OnlineConfig};
-use sp_runtime::traits::Block as BlockT;
+use sp_runtime::{traits::Block as BlockT, DeserializeOwned};
 
 /// Test voter bags migration. `currency_unit` is the number of planks per the the runtimes `UNITS`
 /// (i.e. number of decimal places per DOT, KSM etc)
-pub async fn execute(
+pub async fn execute(
 	currency_unit: u64,
 	currency_name: &'static str,
 	ws_url: String,
diff --git a/frame/bags-list/remote-tests/src/sanity_check.rs b/frame/bags-list/remote-tests/src/sanity_check.rs
index 7282e7bad5e32..adab1ae5477ea 100644
--- a/frame/bags-list/remote-tests/src/sanity_check.rs
+++ b/frame/bags-list/remote-tests/src/sanity_check.rs
@@ -22,11 +22,11 @@ use frame_support::{
 	traits::{Get, PalletInfoAccess},
 };
 use remote_externalities::{Builder, Mode, OnlineConfig};
-use sp_runtime::traits::Block as BlockT;
+use sp_runtime::{traits::Block as BlockT, DeserializeOwned};
 use sp_std::prelude::*;
 
 /// Execute the sanity check of the bags-list.
-pub async fn execute(
+pub async fn execute(
 	currency_unit: u64,
 	currency_name: &'static str,
 	ws_url: String,
diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs
index 6e186a65cb2b9..0e68a4495edfc 100644
--- a/frame/bags-list/remote-tests/src/snapshot.rs
+++ b/frame/bags-list/remote-tests/src/snapshot.rs
@@ -18,10 +18,10 @@
 
 use frame_support::traits::PalletInfoAccess;
 use remote_externalities::{Builder, Mode, OnlineConfig};
-use sp_runtime::traits::Block as BlockT;
+use sp_runtime::{traits::Block as BlockT, DeserializeOwned};
 
 /// Execute create a snapshot from pallet-staking.
-pub async fn execute(
+pub async fn execute(
 	voter_limit: Option,
 	currency_unit: u64,
 	ws_url: String,
diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs
index f80b7b1ac554c..44db2c10fff6c 100644
--- a/frame/support/procedural/src/construct_runtime/parse.rs
+++ b/frame/support/procedural/src/construct_runtime/parse.rs
@@ -225,7 +225,7 @@ impl Parse for PalletDeclaration {
 		{
 			return Err(input.error(
 				"Unexpected tokens, expected one of `::$ident` `::{`, `exclude_parts`, `use_parts`, `=`, `,`",
-			))
+			));
 		} else {
 			None
 		};
diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml
index 9c9025d934aab..f2482f9c423db 100644
--- a/utils/frame/remote-externalities/Cargo.toml
+++ b/utils/frame/remote-externalities/Cargo.toml
@@ -13,10 +13,7 @@ readme = "README.md"
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = [
-    "tokio1",
-]}
-jsonrpsee-proc-macros = "0.3.1"
+jsonrpsee = { version = "0.4.1", features = ["ws-client", "macros"] }
 
 env_logger = "0.9"
 log = "0.4.11"
diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs
index bf8c57ae14ee6..da715be6b4be4 100644
--- a/utils/frame/remote-externalities/src/lib.rs
+++ b/utils/frame/remote-externalities/src/lib.rs
@@ -21,7 +21,16 @@
 //! based chain, or a local state snapshot file.
 
 use codec::{Decode, Encode};
-use jsonrpsee_ws_client::{types::v2::params::JsonRpcParams, WsClient, WsClientBuilder};
+
+use jsonrpsee::{
+	proc_macros::rpc,
+	rpc_params,
+	types::{traits::Client, Error as RpcError},
+	ws_client::{WsClient, WsClientBuilder},
+};
+
+use log::*;
+use serde::de::DeserializeOwned;
 use sp_core::{
 	hashing::twox_128,
 	hexdisplay::HexDisplay,
@@ -39,23 +48,25 @@ pub mod rpc_api;
 type KeyPair = (StorageKey, StorageData);
 
 const LOG_TARGET: &str = "remote-ext";
-const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io";
+const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io:443";
 const BATCH_SIZE: usize = 1000;
 
-jsonrpsee_proc_macros::rpc_client_api! {
-	RpcApi {
-		#[rpc(method = "state_getStorage", positional_params)]
-		fn get_storage(prefix: StorageKey, hash: Option) -> StorageData;
-		#[rpc(method = "state_getKeysPaged", positional_params)]
-		fn get_keys_paged(
-			prefix: Option,
-			count: u32,
-			start_key: Option,
-			hash: Option,
-		) -> Vec;
-		#[rpc(method = "chain_getFinalizedHead", positional_params)]
-		fn finalized_head() -> B::Hash;
-	}
+#[rpc(client)]
+pub trait RpcApi {
+	#[method(name = "state_getStorage")]
+	fn get_storage(&self, prefix: StorageKey, hash: Option) -> Result;
+
+	#[method(name = "state_getKeysPaged")]
+	fn get_keys_paged(
+		&self,
+		prefix: Option,
+		count: u32,
+		start_key: Option,
+		hash: Option,
+	) -> Result, RpcError>;
+
+	#[method(name = "chain_getFinalizedHead")]
+	fn finalized_head(&self) -> Result;
 }
 
 /// The execution mode.
@@ -183,7 +194,7 @@ pub struct Builder {
 
 // NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for
 // that.
-impl Default for Builder {
+impl Default for Builder {
 	fn default() -> Self {
 		Self {
 			mode: Default::default(),
@@ -196,7 +207,7 @@ impl Default for Builder {
 }
 
 // Mode methods
-impl Builder {
+impl Builder {
 	fn as_online(&self) -> &OnlineConfig {
 		match &self.mode {
 			Mode::Online(config) => &config,
@@ -215,25 +226,23 @@ impl Builder {
 }
 
 // RPC methods
-impl Builder {
+impl Builder {
 	async fn rpc_get_storage(
 		&self,
 		key: StorageKey,
 		maybe_at: Option,
 	) -> Result {
-		log::trace!(target: LOG_TARGET, "rpc: get_storage");
-		RpcApi::::get_storage(self.as_online().rpc_client(), key, maybe_at)
-			.await
-			.map_err(|e| {
-				log::error!(target: LOG_TARGET, "Error = {:?}", e);
-				"rpc get_storage failed."
-			})
+		trace!(target: LOG_TARGET, "rpc: get_storage");
+		self.as_online().rpc_client().get_storage(key, maybe_at).await.map_err(|e| {
+			error!("Error = {:?}", e);
+			"rpc get_storage failed."
+		})
 	}
 	/// Get the latest finalized head.
 	async fn rpc_get_head(&self) -> Result {
-		log::trace!(target: LOG_TARGET, "rpc: finalized_head");
-		RpcApi::::finalized_head(self.as_online().rpc_client()).await.map_err(|e| {
-			log::error!(target: LOG_TARGET, "Error = {:?}", e);
+		trace!(target: LOG_TARGET, "rpc: finalized_head");
+		self.as_online().rpc_client().finalized_head().await.map_err(|e| {
+			error!("Error = {:?}", e);
 			"rpc finalized_head failed."
 		})
 	}
@@ -248,18 +257,15 @@ impl Builder {
 		let mut last_key: Option = None;
 		let mut all_keys: Vec = vec![];
 		let keys = loop {
-			let page = RpcApi::::get_keys_paged(
-				self.as_online().rpc_client(),
-				Some(prefix.clone()),
-				PAGE,
-				last_key.clone(),
-				Some(at),
-			)
-			.await
-			.map_err(|e| {
-				log::error!(target: LOG_TARGET, "Error = {:?}", e);
-				"rpc get_keys failed"
-			})?;
+			let page = self
+				.as_online()
+				.rpc_client()
+				.get_keys_paged(Some(prefix.clone()), PAGE, last_key.clone(), Some(at))
+				.await
+				.map_err(|e| {
+					error!(target: LOG_TARGET, "Error = {:?}", e);
+					"rpc get_keys failed"
+				})?;
 			let page_len = page.len();
 			all_keys.extend(page);
 
@@ -291,8 +297,6 @@ impl Builder {
 		prefix: StorageKey,
 		at: B::Hash,
 	) -> Result, &'static str> {
-		use jsonrpsee_ws_client::types::traits::Client;
-		use serde_json::to_value;
 		let keys = self.get_keys_paged(prefix, at).await?;
 		let keys_count = keys.len();
 		log::debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len());
@@ -303,15 +307,7 @@ impl Builder {
 			let batch = chunk_keys
 				.iter()
 				.cloned()
-				.map(|key| {
-					(
-						"state_getStorage",
-						JsonRpcParams::Array(vec![
-							to_value(key).expect("json serialization will work; qed."),
-							to_value(at).expect("json serialization will work; qed."),
-						]),
-					)
-				})
+				.map(|key| ("state_getStorage", rpc_params![key, at]))
 				.collect::>();
 			let values = client.batch_request::>(batch).await.map_err(|e| {
 				log::error!(
@@ -348,7 +344,7 @@ impl Builder {
 }
 
 // Internal methods
-impl Builder {
+impl Builder {
 	/// Save the given data as state snapshot.
 	fn save_state_snapshot(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> {
 		log::debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path);
@@ -489,7 +485,7 @@ impl Builder {
 }
 
 // Public methods
-impl Builder {
+impl Builder {
 	/// Create a new builder.
 	pub fn new() -> Self {
 		Default::default()
@@ -625,7 +621,7 @@ mod tests {
 #[cfg(all(test, feature = "remote-test"))]
 mod remote_tests {
 	use super::test_prelude::*;
-
+	use pallet_elections_phragmen::Members;
 	const REMOTE_INACCESSIBLE: &'static str = "Can't reach the remote node. Is it running?";
 
 	#[tokio::test]
@@ -697,7 +693,6 @@ mod remote_tests {
 
 	#[tokio::test]
 	async fn sanity_check_decoding() {
-		use pallet_elections_phragmen::SeatHolder;
 		use sp_core::crypto::Ss58Codec;
 
 		type AccountId = sp_runtime::AccountId32;
@@ -722,7 +717,7 @@ mod remote_tests {
 				let gav_polkadot =
 					AccountId::from_ss58check("13RDY9nrJpyTDBSUdBw12dGwhk19sGwsrVZ2bxkzYHBSagP2")
 						.unwrap();
-				let members = Members::get().unwrap();
+				let members = Members::get();
 				assert!(members
 					.iter()
 					.map(|s| s.who.clone())
diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs
index 24050856a96a1..024cbad3ca551 100644
--- a/utils/frame/remote-externalities/src/rpc_api.rs
+++ b/utils/frame/remote-externalities/src/rpc_api.rs
@@ -18,9 +18,10 @@
 //! WS RPC API for one off RPC calls to a substrate node.
 // TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988
 
-use jsonrpsee_ws_client::{
-	types::{traits::Client, v2::params::JsonRpcParams},
-	WsClient, WsClientBuilder,
+use jsonrpsee::{
+	rpc_params,
+	types::traits::Client,
+	ws_client::{WsClient, WsClientBuilder},
 };
 use sp_runtime::{
 	generic::SignedBlock,
@@ -34,11 +35,10 @@ where
 	Block::Header: serde::de::DeserializeOwned,
 	S: AsRef,
 {
-	let params = vec![hash_to_json::(at)?];
 	let client = build_client(from).await?;
 
 	client
-		.request::("chain_getHeader", JsonRpcParams::Array(params))
+		.request::("chain_getHeader", rpc_params!(at))
 		.await
 		.map_err(|e| format!("chain_getHeader request failed: {:?}", e))
 }
@@ -52,7 +52,7 @@ where
 	let client = build_client(from).await?;
 
 	client
-		.request::("chain_getFinalizedHead", JsonRpcParams::NoParams)
+		.request::("chain_getFinalizedHead", None)
 		.await
 		.map_err(|e| format!("chain_getFinalizedHead request failed: {:?}", e))
 }
@@ -64,22 +64,15 @@ where
 	Block: BlockT + serde::de::DeserializeOwned,
 	Block::Header: HeaderT,
 {
-	let params = vec![hash_to_json::(at)?];
 	let client = build_client(from).await?;
 	let signed_block = client
-		.request::>("chain_getBlock", JsonRpcParams::Array(params))
+		.request::>("chain_getBlock", rpc_params!(at))
 		.await
 		.map_err(|e| format!("chain_getBlock request failed: {:?}", e))?;
 
 	Ok(signed_block.block)
 }
 
-/// Convert a block hash to a serde json value.
-fn hash_to_json(hash: Block::Hash) -> Result {
-	serde_json::to_value(hash)
-		.map_err(|e| format!("Block hash could not be converted to JSON: {:?}", e))
-}
-
 /// Build a website client that connects to `from`.
 async fn build_client>(from: S) -> Result {
 	WsClientBuilder::default()
@@ -99,13 +92,9 @@ where
 	Block: BlockT + serde::de::DeserializeOwned,
 	Block::Header: HeaderT,
 {
-	let params = if let Some(at) = at { vec![hash_to_json::(at)?] } else { vec![] };
 	let client = build_client(from).await?;
 	client
-		.request::(
-			"state_getRuntimeVersion",
-			JsonRpcParams::Array(params),
-		)
+		.request::("state_getRuntimeVersion", rpc_params!(at))
 		.await
 		.map_err(|e| format!("state_getRuntimeVersion request failed: {:?}", e))
 }
diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml
index 154c522c1dfd0..a89a625bbd9ed 100644
--- a/utils/frame/try-runtime/cli/Cargo.toml
+++ b/utils/frame/try-runtime/cli/Cargo.toml
@@ -31,6 +31,4 @@ sp-externalities = { version = "0.10.0-dev", path = "../../../../primitives/exte
 sp-version = { version = "4.0.0-dev", path = "../../../../primitives/version" }
 
 remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" }
-jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = [
-    "tokio1",
-]}
+jsonrpsee = { version = "0.4.1", default-features = false, features = ["ws-client"]}
diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs
index 9125db13c78f9..27fb35dd7a46a 100644
--- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs
+++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs
@@ -19,9 +19,9 @@ use crate::{
 	build_executor, ensure_matching_spec, extract_code, full_extensions, local_spec, parse,
 	state_machine_call, SharedParams, LOG_TARGET,
 };
-use jsonrpsee_ws_client::{
-	types::{traits::SubscriptionClient, v2::params::JsonRpcParams, Subscription},
-	WsClientBuilder,
+use jsonrpsee::{
+	types::{traits::SubscriptionClient, Subscription},
+	ws_client::WsClientBuilder,
 };
 use parity_scale_codec::Decode;
 use remote_externalities::{rpc_api, Builder, Mode, OnlineConfig};
@@ -72,7 +72,7 @@ where
 
 	log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB);
 	let mut subscription: Subscription =
-		client.subscribe(&SUB, JsonRpcParams::NoParams, &UN_SUB).await.unwrap();
+		client.subscribe(&SUB, None, &UN_SUB).await.unwrap();
 
 	let (code_key, code) = extract_code(&config.chain_spec)?;
 	let executor = build_executor::(&shared, &config);
diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs
index 28e51b38f2ace..a74625492bb1c 100644
--- a/utils/frame/try-runtime/cli/src/lib.rs
+++ b/utils/frame/try-runtime/cli/src/lib.rs
@@ -281,7 +281,10 @@ use sp_core::{
 };
 use sp_externalities::Extensions;
 use sp_keystore::{testing::KeyStore, KeystoreExt};
-use sp_runtime::traits::{Block as BlockT, NumberFor};
+use sp_runtime::{
+	traits::{Block as BlockT, NumberFor},
+	DeserializeOwned,
+};
 use sp_state_machine::{OverlayedChanges, StateMachine};
 use std::{fmt::Debug, path::PathBuf, str::FromStr};
 
@@ -464,7 +467,7 @@ pub enum State {
 
 impl State {
 	/// Create the [`remote_externalities::Builder`] from self.
-	pub(crate) fn builder(&self) -> sc_cli::Result>
+	pub(crate) fn builder(&self) -> sc_cli::Result>
 	where
 		Block::Hash: FromStr,
 		::Err: Debug,

From 8d7898923c8287df6f2014c31940d7b5a2df9323 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Nov 2021 13:56:46 +0000
Subject: [PATCH 080/162] Bump substrate-bip39 from 0.4.2 to 0.4.4 (#10213)

Bumps [substrate-bip39](https://github.com/paritytech/substrate-bip39) from 0.4.2 to 0.4.4.
- [Release notes](https://github.com/paritytech/substrate-bip39/releases)
- [Commits](https://github.com/paritytech/substrate-bip39/commits)

---
updated-dependencies:
- dependency-name: substrate-bip39
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] 

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 Cargo.lock                 | 73 +++++++++++++++++---------------------
 primitives/core/Cargo.toml |  2 +-
 2 files changed, 34 insertions(+), 41 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 5ae937ad22d4f..51eb96d6989f9 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -59,7 +59,7 @@ dependencies = [
  "cipher",
  "ctr",
  "ghash",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
@@ -1309,22 +1309,22 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
 
 [[package]]
 name = "crypto-mac"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5"
+checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
 dependencies = [
- "generic-array 0.12.4",
- "subtle 1.0.0",
+ "generic-array 0.14.4",
+ "subtle",
 ]
 
 [[package]]
 name = "crypto-mac"
-version = "0.8.0"
+version = "0.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
+checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
 dependencies = [
  "generic-array 0.14.4",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
@@ -1397,7 +1397,7 @@ dependencies = [
  "byteorder",
  "digest 0.8.1",
  "rand_core 0.5.1",
- "subtle 2.4.0",
+ "subtle",
  "zeroize",
 ]
 
@@ -1410,7 +1410,7 @@ dependencies = [
  "byteorder",
  "digest 0.9.0",
  "rand_core 0.5.1",
- "subtle 2.4.0",
+ "subtle",
  "zeroize",
 ]
 
@@ -2610,21 +2610,21 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f"
 
 [[package]]
 name = "hmac"
-version = "0.7.1"
+version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695"
+checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840"
 dependencies = [
- "crypto-mac 0.7.0",
- "digest 0.8.1",
+ "crypto-mac 0.8.0",
+ "digest 0.9.0",
 ]
 
 [[package]]
 name = "hmac"
-version = "0.8.1"
+version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840"
+checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
 dependencies = [
- "crypto-mac 0.8.0",
+ "crypto-mac 0.11.1",
  "digest 0.9.0",
 ]
 
@@ -3868,7 +3868,7 @@ checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80"
 dependencies = [
  "crunchy",
  "digest 0.9.0",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
@@ -3879,7 +3879,7 @@ checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451"
 dependencies = [
  "crunchy",
  "digest 0.9.0",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
@@ -6443,21 +6443,20 @@ dependencies = [
 
 [[package]]
 name = "pbkdf2"
-version = "0.3.0"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9"
+checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd"
 dependencies = [
- "byteorder",
- "crypto-mac 0.7.0",
+ "crypto-mac 0.8.0",
 ]
 
 [[package]]
 name = "pbkdf2"
-version = "0.4.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd"
+checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa"
 dependencies = [
- "crypto-mac 0.8.0",
+ "crypto-mac 0.11.1",
 ]
 
 [[package]]
@@ -8779,7 +8778,7 @@ dependencies = [
  "rand 0.7.3",
  "rand_core 0.5.1",
  "sha2 0.8.2",
- "subtle 2.4.0",
+ "subtle",
  "zeroize",
 ]
 
@@ -9097,7 +9096,7 @@ dependencies = [
  "ring",
  "rustc_version 0.3.3",
  "sha2 0.9.8",
- "subtle 2.4.0",
+ "subtle",
  "x25519-dalek",
 ]
 
@@ -10082,14 +10081,14 @@ dependencies = [
 
 [[package]]
 name = "substrate-bip39"
-version = "0.4.2"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236"
+checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c"
 dependencies = [
- "hmac 0.7.1",
- "pbkdf2 0.3.0",
+ "hmac 0.11.0",
+ "pbkdf2 0.8.0",
  "schnorrkel",
- "sha2 0.8.2",
+ "sha2 0.9.8",
  "zeroize",
 ]
 
@@ -10315,12 +10314,6 @@ dependencies = [
  "wasm-gc-api",
 ]
 
-[[package]]
-name = "subtle"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee"
-
 [[package]]
 name = "subtle"
 version = "2.4.0"
@@ -11088,7 +11081,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402"
 dependencies = [
  "generic-array 0.14.4",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml
index 9e31ba644c250..26acd44c7cbdc 100644
--- a/primitives/core/Cargo.toml
+++ b/primitives/core/Cargo.toml
@@ -32,7 +32,7 @@ hash-db = { version = "0.15.2", default-features = false }
 hash256-std-hasher = { version = "0.15.2", default-features = false }
 base58 = { version = "0.2.0", optional = true }
 rand = { version = "0.7.3", optional = true, features = ["small_rng"] }
-substrate-bip39 = { version = "0.4.2", optional = true }
+substrate-bip39 = { version = "0.4.4", optional = true }
 tiny-bip39 = { version = "0.8.2", optional = true }
 regex = { version = "1.5.4", optional = true }
 num-traits = { version = "0.2.8", default-features = false }

From 083d0ce1386e4b9d87692ce0d30199073ffd02fd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Nov 2021 12:02:39 -0300
Subject: [PATCH 081/162] Bump parity-util-mem from 0.10.0 to 0.10.2 (#10236)

Bumps [parity-util-mem](https://github.com/paritytech/parity-common) from 0.10.0 to 0.10.2.
- [Release notes](https://github.com/paritytech/parity-common/releases)
- [Commits](https://github.com/paritytech/parity-common/compare/parity-util-mem-v0.10.0...parity-util-mem-v0.10.2)

---
updated-dependencies:
- dependency-name: parity-util-mem
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] 

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 Cargo.lock                            | 4 ++--
 bin/node/bench/Cargo.toml             | 2 +-
 client/informant/Cargo.toml           | 2 +-
 client/service/Cargo.toml             | 2 +-
 client/state-db/Cargo.toml            | 2 +-
 client/transaction-pool/Cargo.toml    | 2 +-
 frame/support/Cargo.toml              | 2 +-
 primitives/core/Cargo.toml            | 2 +-
 primitives/runtime/Cargo.toml         | 2 +-
 primitives/test-primitives/Cargo.toml | 2 +-
 test-utils/runtime/Cargo.toml         | 2 +-
 11 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 51eb96d6989f9..8ad1560b85aa1 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6301,9 +6301,9 @@ dependencies = [
 
 [[package]]
 name = "parity-util-mem"
-version = "0.10.0"
+version = "0.10.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ad6f1acec69b95caf435bbd158d486e5a0a44fcf51531e84922c59ff09e8457"
+checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770"
 dependencies = [
  "cfg-if 1.0.0",
  "hashbrown 0.11.2",
diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml
index f59ea2361186c..66a14a123ee56 100644
--- a/bin/node/bench/Cargo.toml
+++ b/bin/node/bench/Cargo.toml
@@ -35,7 +35,7 @@ fs_extra = "1"
 hex = "0.4.0"
 rand = { version = "0.7.2", features = ["small_rng"] }
 lazy_static = "1.4.0"
-parity-util-mem = { version = "0.10.0", default-features = false, features = [
+parity-util-mem = { version = "0.10.2", default-features = false, features = [
     "primitive-types",
 ] }
 parity-db = { version = "0.3" }
diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml
index 7d92e14e5d471..5eba3ecaeb2ac 100644
--- a/client/informant/Cargo.toml
+++ b/client/informant/Cargo.toml
@@ -17,7 +17,7 @@ ansi_term = "0.12.1"
 futures = "0.3.9"
 futures-timer = "3.0.1"
 log = "0.4.8"
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
+parity-util-mem = { version = "0.10.2", default-features = false, features = ["primitive-types"] }
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
 sc-network = { version = "0.10.0-dev", path = "../network" }
 sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" }
diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml
index 97b0f4e461c87..c090495fe04ec 100644
--- a/client/service/Cargo.toml
+++ b/client/service/Cargo.toml
@@ -74,7 +74,7 @@ sc-tracing = { version = "4.0.0-dev", path = "../tracing" }
 sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" }
 tracing = "0.1.29"
 tracing-futures = { version = "0.2.4" }
-parity-util-mem = { version = "0.10.0", default-features = false, features = [
+parity-util-mem = { version = "0.10.2", default-features = false, features = [
 	"primitive-types",
 ] }
 async-trait = "0.1.50"
diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml
index abd378c6fff38..cd14038cb77b5 100644
--- a/client/state-db/Cargo.toml
+++ b/client/state-db/Cargo.toml
@@ -18,5 +18,5 @@ log = "0.4.11"
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
 sp-core = { version = "4.0.0-dev", path = "../../primitives/core" }
 codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] }
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
+parity-util-mem = { version = "0.10.2", default-features = false, features = ["primitive-types"] }
 parity-util-mem-derive = "0.1.0"
diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml
index 99f1f3788ec0e..d26969cf1e092 100644
--- a/client/transaction-pool/Cargo.toml
+++ b/client/transaction-pool/Cargo.toml
@@ -18,7 +18,7 @@ thiserror = "1.0.21"
 futures = "0.3.16"
 intervalier = "0.4.0"
 log = "0.4.8"
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
+parity-util-mem = { version = "0.10.2", default-features = false, features = ["primitive-types"] }
 parking_lot = "0.11.1"
 prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev"}
 sc-client-api = { version = "4.0.0-dev", path = "../api" }
diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml
index 42981dc160a4b..1f48dadc2987d 100644
--- a/frame/support/Cargo.toml
+++ b/frame/support/Cargo.toml
@@ -40,7 +40,7 @@ sp-core-hashing-proc-macro = { version = "4.0.0-dev", path = "../../primitives/c
 assert_matches = "1.3.0"
 pretty_assertions = "1.0.0"
 frame-system = { version = "4.0.0-dev", path = "../system" }
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
+parity-util-mem = { version = "0.10.2", default-features = false, features = ["primitive-types"] }
 
 [features]
 default = ["std"]
diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml
index 26acd44c7cbdc..48749904763fd 100644
--- a/primitives/core/Cargo.toml
+++ b/primitives/core/Cargo.toml
@@ -43,7 +43,7 @@ parking_lot = { version = "0.11.1", optional = true }
 sp-debug-derive = { version = "4.0.0-dev", default-features = false, path = "../debug-derive" }
 sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" }
 sp-storage = { version = "4.0.0-dev", default-features = false, path = "../storage" }
-parity-util-mem = { version = "0.10.0", default-features = false, features = [
+parity-util-mem = { version = "0.10.2", default-features = false, features = [
 	"primitive-types",
 ] }
 futures = { version = "0.3.1", optional = true }
diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml
index 9d2957e7aed1a..8e466bf4a1e85 100644
--- a/primitives/runtime/Cargo.toml
+++ b/primitives/runtime/Cargo.toml
@@ -27,7 +27,7 @@ log = { version = "0.4.14", default-features = false }
 paste = "1.0"
 rand = { version = "0.7.2", optional = true }
 impl-trait-for-tuples = "0.2.1"
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
+parity-util-mem = { version = "0.10.2", default-features = false, features = ["primitive-types"] }
 hash256-std-hasher = { version = "0.15.2", default-features = false }
 either = { version = "1.5", default-features = false }
 
diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml
index 4d184c7d02e7f..b95c88626996b 100644
--- a/primitives/test-primitives/Cargo.toml
+++ b/primitives/test-primitives/Cargo.toml
@@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features =
 sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" }
 serde = { version = "1.0.126", optional = true, features = ["derive"] }
 sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" }
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
+parity-util-mem = { version = "0.10.2", default-features = false, features = ["primitive-types"] }
 
 [features]
 default = [
diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml
index f02e079046336..d0d85029738ea 100644
--- a/test-utils/runtime/Cargo.toml
+++ b/test-utils/runtime/Cargo.toml
@@ -40,7 +40,7 @@ sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path =
 sp-trie = { version = "4.0.0-dev", default-features = false, path = "../../primitives/trie" }
 sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" }
 trie-db = { version = "0.22.6", default-features = false }
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
+parity-util-mem = { version = "0.10.2", default-features = false, features = ["primitive-types"] }
 sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" }
 sp-state-machine = { version = "0.10.0-dev", default-features = false, path = "../../primitives/state-machine" }
 sp-externalities = { version = "0.10.0-dev", default-features = false, path = "../../primitives/externalities" }

From c2afe155d4239480795e4d27f9ebb75f1a584ecc Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Nov 2021 16:58:14 +0100
Subject: [PATCH 082/162] Bump proc-macro-crate from 1.0.0 to 1.1.0 (#10237)

Bumps [proc-macro-crate](https://github.com/bkchr/proc-macro-crate) from 1.0.0 to 1.1.0.
- [Release notes](https://github.com/bkchr/proc-macro-crate/releases)
- [Commits](https://github.com/bkchr/proc-macro-crate/commits)

---
updated-dependencies:
- dependency-name: proc-macro-crate
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] 

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 Cargo.lock                                    | 28 +++++++++----------
 client/chain-spec/derive/Cargo.toml           |  2 +-
 client/tracing/proc-macro/Cargo.toml          |  2 +-
 frame/staking/reward-curve/Cargo.toml         |  2 +-
 frame/support/procedural/tools/Cargo.toml     |  2 +-
 primitives/api/proc-macro/Cargo.toml          |  2 +-
 .../npos-elections/solution-type/Cargo.toml   |  2 +-
 .../runtime-interface/proc-macro/Cargo.toml   |  2 +-
 test-utils/derive/Cargo.toml                  |  2 +-
 9 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 8ad1560b85aa1..423ec1de4e7de 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2071,7 +2071,7 @@ name = "frame-support-procedural-tools"
 version = "4.0.0-dev"
 dependencies = [
  "frame-support-procedural-tools-derive",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -3146,7 +3146,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d802063f7a3c867456955f9d2f15eb3ee0edb5ec9ec2b5526324756759221c0f"
 dependencies = [
  "log 0.4.14",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -4362,7 +4362,7 @@ version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro-error",
  "proc-macro2",
  "quote",
@@ -6022,7 +6022,7 @@ dependencies = [
 name = "pallet-staking-reward-curve"
 version = "4.0.0-dev"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "sp-runtime",
@@ -6273,7 +6273,7 @@ version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -6743,9 +6743,9 @@ dependencies = [
 
 [[package]]
 name = "proc-macro-crate"
-version = "1.0.0"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92"
+checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83"
 dependencies = [
  "thiserror",
  "toml",
@@ -7664,7 +7664,7 @@ dependencies = [
 name = "sc-chain-spec-derive"
 version = "4.0.0-dev"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -8665,7 +8665,7 @@ dependencies = [
 name = "sc-tracing-proc-macro"
 version = "4.0.0-dev"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -8748,7 +8748,7 @@ version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -9174,7 +9174,7 @@ name = "sp-api-proc-macro"
 version = "4.0.0-dev"
 dependencies = [
  "blake2-rfc",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -9618,7 +9618,7 @@ name = "sp-npos-elections-solution-type"
 version = "4.0.0-dev"
 dependencies = [
  "parity-scale-codec",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "scale-info",
@@ -9709,7 +9709,7 @@ name = "sp-runtime-interface-proc-macro"
 version = "4.0.0-dev"
 dependencies = [
  "Inflector",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -10285,7 +10285,7 @@ dependencies = [
 name = "substrate-test-utils-derive"
 version = "0.10.0-dev"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml
index 25339eea14f12..3ffb70d50ef52 100644
--- a/client/chain-spec/derive/Cargo.toml
+++ b/client/chain-spec/derive/Cargo.toml
@@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 proc-macro = true
 
 [dependencies]
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
 proc-macro2 = "1.0.29"
 quote = "1.0.10"
 syn = "1.0.81"
diff --git a/client/tracing/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml
index c51c8764e6772..39c22bf856b26 100644
--- a/client/tracing/proc-macro/Cargo.toml
+++ b/client/tracing/proc-macro/Cargo.toml
@@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 proc-macro = true
 
 [dependencies]
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
 proc-macro2 = "1.0.29"
 quote = { version = "1.0.10", features = ["proc-macro"] }
 syn = { version = "1.0.81", features = ["proc-macro", "full", "extra-traits", "parsing"] }
diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml
index 1250bbcd39056..fd306eb085727 100644
--- a/frame/staking/reward-curve/Cargo.toml
+++ b/frame/staking/reward-curve/Cargo.toml
@@ -18,7 +18,7 @@ proc-macro = true
 syn = { version = "1.0.81", features = ["full", "visit"] }
 quote = "1.0.10"
 proc-macro2 = "1.0.29"
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
 
 [dev-dependencies]
 sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" }
diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml
index 7b957be184014..c0a3b98632f59 100644
--- a/frame/support/procedural/tools/Cargo.toml
+++ b/frame/support/procedural/tools/Cargo.toml
@@ -16,4 +16,4 @@ frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" }
 proc-macro2 = "1.0.29"
 quote = "1.0.10"
 syn = { version = "1.0.81", features = ["full", "visit", "extra-traits"] }
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml
index a5df7aef322ab..c8eaae7c02abe 100644
--- a/primitives/api/proc-macro/Cargo.toml
+++ b/primitives/api/proc-macro/Cargo.toml
@@ -20,7 +20,7 @@ quote = "1.0.10"
 syn = { version = "1.0.81", features = ["full", "fold", "extra-traits", "visit"] }
 proc-macro2 = "1.0.29"
 blake2-rfc = { version = "0.2.18", default-features = false }
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
 
 # Required for the doc tests
 [features]
diff --git a/primitives/npos-elections/solution-type/Cargo.toml b/primitives/npos-elections/solution-type/Cargo.toml
index 5d8d3890577a7..a64ca45ce8424 100644
--- a/primitives/npos-elections/solution-type/Cargo.toml
+++ b/primitives/npos-elections/solution-type/Cargo.toml
@@ -18,7 +18,7 @@ proc-macro = true
 syn = { version = "1.0.81", features = ["full", "visit"] }
 quote = "1.0"
 proc-macro2 = "1.0.29"
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
 
 [dev-dependencies]
 parity-scale-codec = "2.3.1"
diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml
index cc5daa695bb0f..0be33c2c55ad1 100644
--- a/primitives/runtime-interface/proc-macro/Cargo.toml
+++ b/primitives/runtime-interface/proc-macro/Cargo.toml
@@ -20,4 +20,4 @@ syn = { version = "1.0.81", features = ["full", "visit", "fold", "extra-traits"]
 quote = "1.0.10"
 proc-macro2 = "1.0.29"
 Inflector = "0.11.4"
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml
index 967fc1e87a36e..0b894d92b2bcb 100644
--- a/test-utils/derive/Cargo.toml
+++ b/test-utils/derive/Cargo.toml
@@ -11,7 +11,7 @@ description = "Substrate test utilities macros"
 [dependencies]
 quote = "1.0.10"
 syn = { version = "1.0.81", features = ["full"] }
-proc-macro-crate = "1.0.0"
+proc-macro-crate = "1.1.0"
 proc-macro2 = "1.0.29"
 
 [lib]

From f78549161b9827ddde2519a12d1a279b83634f41 Mon Sep 17 00:00:00 2001
From: sandreim <54316454+sandreim@users.noreply.github.com>
Date: Thu, 11 Nov 2021 19:15:09 +0200
Subject: [PATCH 083/162] Add group name in task metrics  (#10196)

* SpawnNamed: add new trait methods

Signed-off-by: Andrei Sandu 

* Implement new methods

Signed-off-by: Andrei Sandu 

* cargo fmt

Signed-off-by: Andrei Sandu 

* SpawnNamed: add new trait methods

Signed-off-by: Andrei Sandu 

* Implement new methods

Signed-off-by: Andrei Sandu 

* cargo fmt

Signed-off-by: Andrei Sandu 

* New approach - spaw() group param

Signed-off-by: Andrei Sandu 

* Update traits: SpawnNamed and SpawnNamed

Signed-off-by: Andrei Sandu 

* Update TaskManager tests

Signed-off-by: Andrei Sandu 

* Update test TaskExecutor

Signed-off-by: Andrei Sandu 

* Fix typo

Signed-off-by: Andrei Sandu 

* grunt work: fix spawn() calls

Signed-off-by: Andrei Sandu 

* cargo fmt

Signed-off-by: Andrei Sandu 

* remove old code

Signed-off-by: Andrei Sandu 

* cargo fmt - the right version

Signed-off-by: Andrei Sandu 

* Implement review feedback

- use Option group name in SpawnNamed methods
- switch to kebab case
- implement default group name
- add group name to some tasks

Signed-off-by: Andrei Sandu 
---
 bin/node-template/node/src/service.rs         |   7 +-
 bin/node/cli/src/service.rs                   |  24 +++--
 bin/node/testing/src/bench.rs                 |  14 ++-
 .../basic-authorship/src/basic_authorship.rs  |   1 +
 .../common/src/import_queue/basic_queue.rs    |   6 +-
 client/executor/src/native_executor.rs        |   1 +
 client/offchain/src/lib.rs                    |   1 +
 client/rpc/src/lib.rs                         |   3 +-
 client/service/src/builder.rs                 |  20 ++--
 client/service/src/lib.rs                     |   2 +-
 client/service/src/task_manager/mod.rs        | 100 ++++++++++++------
 client/service/src/task_manager/tests.rs      |  52 ++++-----
 client/transaction-pool/src/api.rs            |   1 +
 client/transaction-pool/src/lib.rs            |   2 +-
 primitives/core/src/testing.rs                |  22 +++-
 primitives/core/src/traits.rs                 |  67 +++++++++---
 primitives/io/src/batch_verifier.rs           |   4 +-
 primitives/tasks/src/lib.rs                   |   1 +
 test-utils/test-runner/src/client.rs          |   4 +-
 19 files changed, 232 insertions(+), 100 deletions(-)

diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs
index d673a54a94882..c71336c330882 100644
--- a/bin/node-template/node/src/service.rs
+++ b/bin/node-template/node/src/service.rs
@@ -89,7 +89,7 @@ pub fn new_partial(
 	let client = Arc::new(client);
 
 	let telemetry = telemetry.map(|(worker, telemetry)| {
-		task_manager.spawn_handle().spawn("telemetry", worker.run());
+		task_manager.spawn_handle().spawn("telemetry", None, worker.run());
 		telemetry
 	});
 
@@ -289,7 +289,9 @@ pub fn new_full(mut config: Configuration) -> Result
 
 		// the AURA authoring task is considered essential, i.e. if it
 		// fails we take down the service with it.
-		task_manager.spawn_essential_handle().spawn_blocking("aura", aura);
+		task_manager
+			.spawn_essential_handle()
+			.spawn_blocking("aura", Some("block-authoring"), aura);
 	}
 
 	// if the node isn't actively participating in consensus then it doesn't
@@ -329,6 +331,7 @@ pub fn new_full(mut config: Configuration) -> Result
 		// if it fails we take down the service with it.
 		task_manager.spawn_essential_handle().spawn_blocking(
 			"grandpa-voter",
+			None,
 			sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
 		);
 	}
diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs
index a022d501d0155..dee546bf07001 100644
--- a/bin/node/cli/src/service.rs
+++ b/bin/node/cli/src/service.rs
@@ -170,7 +170,7 @@ pub fn new_partial(
 	let client = Arc::new(client);
 
 	let telemetry = telemetry.map(|(worker, telemetry)| {
-		task_manager.spawn_handle().spawn("telemetry", worker.run());
+		task_manager.spawn_handle().spawn("telemetry", None, worker.run());
 		telemetry
 	});
 
@@ -436,7 +436,11 @@ pub fn new_full_base(
 		};
 
 		let babe = sc_consensus_babe::start_babe(babe_config)?;
-		task_manager.spawn_essential_handle().spawn_blocking("babe-proposer", babe);
+		task_manager.spawn_essential_handle().spawn_blocking(
+			"babe-proposer",
+			Some("block-authoring"),
+			babe,
+		);
 	}
 
 	// Spawn authority discovery module.
@@ -463,9 +467,11 @@ pub fn new_full_base(
 				prometheus_registry.clone(),
 			);
 
-		task_manager
-			.spawn_handle()
-			.spawn("authority-discovery-worker", authority_discovery_worker.run());
+		task_manager.spawn_handle().spawn(
+			"authority-discovery-worker",
+			Some("networking"),
+			authority_discovery_worker.run(),
+		);
 	}
 
 	// if the node isn't actively participating in consensus then it doesn't
@@ -503,9 +509,11 @@ pub fn new_full_base(
 
 		// the GRANDPA voter task is considered infallible, i.e.
 		// if it fails we take down the service with it.
-		task_manager
-			.spawn_essential_handle()
-			.spawn_blocking("grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)?);
+		task_manager.spawn_essential_handle().spawn_blocking(
+			"grandpa-voter",
+			None,
+			grandpa::run_grandpa_voter(grandpa_config)?,
+		);
 	}
 
 	network_starter.start_network();
diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs
index cf0a463cc3e99..5ee1ec998be4d 100644
--- a/bin/node/testing/src/bench.rs
+++ b/bin/node/testing/src/bench.rs
@@ -243,11 +243,21 @@ impl TaskExecutor {
 }
 
 impl SpawnNamed for TaskExecutor {
-	fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
+	fn spawn(
+		&self,
+		_: &'static str,
+		_: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
 		self.pool.spawn_ok(future);
 	}
 
-	fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
+	fn spawn_blocking(
+		&self,
+		_: &'static str,
+		_: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
 		self.pool.spawn_ok(future);
 	}
 }
diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs
index 573601a9102c5..305c4d753c1ea 100644
--- a/client/basic-authorship/src/basic_authorship.rs
+++ b/client/basic-authorship/src/basic_authorship.rs
@@ -270,6 +270,7 @@ where
 
 		spawn_handle.spawn_blocking(
 			"basic-authorship-proposer",
+			None,
 			Box::pin(async move {
 				// leave some time for evaluation and block finalization (33%)
 				let deadline = (self.now)() + max_duration - max_duration / 3;
diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs
index 9042c8798be4f..0461d7cf954cb 100644
--- a/client/consensus/common/src/import_queue/basic_queue.rs
+++ b/client/consensus/common/src/import_queue/basic_queue.rs
@@ -89,7 +89,11 @@ impl BasicQueue {
 			metrics,
 		);
 
-		spawner.spawn_essential_blocking("basic-block-import-worker", future.boxed());
+		spawner.spawn_essential_blocking(
+			"basic-block-import-worker",
+			Some("block-import"),
+			future.boxed(),
+		);
 
 		Self { justification_sender, block_import_sender, result_port, _phantom: PhantomData }
 	}
diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs
index d912fc0fd13c9..62e76d559c0f2 100644
--- a/client/executor/src/native_executor.rs
+++ b/client/executor/src/native_executor.rs
@@ -399,6 +399,7 @@ impl RuntimeSpawn for RuntimeInstanceSpawn {
 		let scheduler = self.scheduler.clone();
 		self.scheduler.spawn(
 			"executor-extra-runtime-instance",
+			None,
 			Box::pin(async move {
 				let module = AssertUnwindSafe(module);
 
diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs
index a77fd17a2c8b8..2de24e10d927d 100644
--- a/client/offchain/src/lib.rs
+++ b/client/offchain/src/lib.rs
@@ -226,6 +226,7 @@ pub async fn notification_future(
 			if n.is_new_best {
 				spawner.spawn(
 					"offchain-on-block",
+					Some("offchain-worker"),
 					offchain
 						.on_block_imported(&n.header, network_provider.clone(), is_validator)
 						.boxed(),
diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs
index 832585db4854c..8f951632698fd 100644
--- a/client/rpc/src/lib.rs
+++ b/client/rpc/src/lib.rs
@@ -54,7 +54,8 @@ impl SubscriptionTaskExecutor {
 
 impl Spawn for SubscriptionTaskExecutor {
 	fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> {
-		self.0.spawn("substrate-rpc-subscription", future.map(drop).boxed());
+		self.0
+			.spawn("substrate-rpc-subscription", Some("rpc"), future.map(drop).boxed());
 		Ok(())
 	}
 
diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs
index bcb05ce743701..88ba6282b5f4e 100644
--- a/client/service/src/builder.rs
+++ b/client/service/src/builder.rs
@@ -424,6 +424,7 @@ where
 	if let Some(offchain) = offchain_workers.clone() {
 		spawn_handle.spawn(
 			"offchain-notifications",
+			Some("offchain-worker"),
 			sc_offchain::notification_future(
 				config.role.is_authority(),
 				client.clone(),
@@ -505,11 +506,13 @@ where
 	// Inform the tx pool about imported and finalized blocks.
 	spawn_handle.spawn(
 		"txpool-notifications",
+		Some("transaction-pool"),
 		sc_transaction_pool::notification_future(client.clone(), transaction_pool.clone()),
 	);
 
 	spawn_handle.spawn(
 		"on-transaction-imported",
+		Some("transaction-pool"),
 		transaction_notifications(transaction_pool.clone(), network.clone(), telemetry.clone()),
 	);
 
@@ -520,6 +523,7 @@ where
 			let metrics = MetricsService::with_prometheus(telemetry.clone(), ®istry, &config)?;
 			spawn_handle.spawn(
 				"prometheus-endpoint",
+				None,
 				prometheus_endpoint::init_prometheus(port, registry).map(drop),
 			);
 
@@ -531,6 +535,7 @@ where
 	// Periodically updated metrics and telemetry updates.
 	spawn_handle.spawn(
 		"telemetry-periodic-send",
+		None,
 		metrics_service.run(client.clone(), transaction_pool.clone(), network.clone()),
 	);
 
@@ -567,6 +572,7 @@ where
 	// Spawn informant task
 	spawn_handle.spawn(
 		"informant",
+		None,
 		sc_informant::build(
 			client.clone(),
 			network.clone(),
@@ -798,7 +804,7 @@ where
 				config.network.default_peers_set.in_peers as usize +
 					config.network.default_peers_set.out_peers as usize,
 			);
-			spawn_handle.spawn("block_request_handler", handler.run());
+			spawn_handle.spawn("block-request-handler", Some("networking"), handler.run());
 			protocol_config
 		}
 	};
@@ -815,7 +821,7 @@ where
 				config.network.default_peers_set.in_peers as usize +
 					config.network.default_peers_set.out_peers as usize,
 			);
-			spawn_handle.spawn("state_request_handler", handler.run());
+			spawn_handle.spawn("state-request-handler", Some("networking"), handler.run());
 			protocol_config
 		}
 	};
@@ -828,7 +834,7 @@ where
 			// Allow both outgoing and incoming requests.
 			let (handler, protocol_config) =
 				WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone());
-			spawn_handle.spawn("warp_sync_request_handler", handler.run());
+			spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run());
 			protocol_config
 		};
 		(provider, protocol_config)
@@ -842,7 +848,7 @@ where
 			// Allow both outgoing and incoming requests.
 			let (handler, protocol_config) =
 				LightClientRequestHandler::new(&protocol_id, client.clone());
-			spawn_handle.spawn("light_client_request_handler", handler.run());
+			spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run());
 			protocol_config
 		}
 	};
@@ -852,13 +858,13 @@ where
 		executor: {
 			let spawn_handle = Clone::clone(&spawn_handle);
 			Some(Box::new(move |fut| {
-				spawn_handle.spawn("libp2p-node", fut);
+				spawn_handle.spawn("libp2p-node", Some("networking"), fut);
 			}))
 		},
 		transactions_handler_executor: {
 			let spawn_handle = Clone::clone(&spawn_handle);
 			Box::new(move |fut| {
-				spawn_handle.spawn("network-transactions-handler", fut);
+				spawn_handle.spawn("network-transactions-handler", Some("networking"), fut);
 			})
 		},
 		network_config: config.network.clone(),
@@ -920,7 +926,7 @@ where
 	// issue, and ideally we would like to fix the network future to take as little time as
 	// possible, but we also take the extra harm-prevention measure to execute the networking
 	// future using `spawn_blocking`.
-	spawn_handle.spawn_blocking("network-worker", async move {
+	spawn_handle.spawn_blocking("network-worker", Some("networking"), async move {
 		if network_start_rx.await.is_err() {
 			log::warn!(
 				"The NetworkStart returned as part of `build_network` has been silently dropped"
diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs
index ce77be5a7c1d9..bd43d4c464ea0 100644
--- a/client/service/src/lib.rs
+++ b/client/service/src/lib.rs
@@ -75,7 +75,7 @@ pub use sc_transaction_pool::Options as TransactionPoolOptions;
 pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool};
 #[doc(hidden)]
 pub use std::{ops::Deref, result::Result, sync::Arc};
-pub use task_manager::{SpawnTaskHandle, TaskManager};
+pub use task_manager::{SpawnTaskHandle, TaskManager, DEFAULT_GROUP_NAME};
 
 const DEFAULT_PROTOCOL_ID: &str = "sup";
 
diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs
index c827aa71dac2c..64c00226073c7 100644
--- a/client/service/src/task_manager/mod.rs
+++ b/client/service/src/task_manager/mod.rs
@@ -38,6 +38,9 @@ mod prometheus_future;
 #[cfg(test)]
 mod tests;
 
+/// Default task group name.
+pub const DEFAULT_GROUP_NAME: &'static str = "default";
+
 /// An handle for spawning tasks in the service.
 #[derive(Clone)]
 pub struct SpawnTaskHandle {
@@ -48,31 +51,39 @@ pub struct SpawnTaskHandle {
 }
 
 impl SpawnTaskHandle {
-	/// Spawns the given task with the given name.
+	/// Spawns the given task with the given name and an optional group name.
+	/// If group is not specified `DEFAULT_GROUP_NAME` will be used.
 	///
-	/// Note that the `name` is a `&'static str`. The reason for this choice is that statistics
-	/// about this task are getting reported to the Prometheus endpoint (if enabled), and that
-	/// therefore the set of possible task names must be bounded.
+	/// Note that the `name`/`group` is a `&'static str`. The reason for this choice is that
+	/// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and
+	/// that therefore the set of possible task names must be bounded.
 	///
 	/// In other words, it would be a bad idea for someone to do for example
 	/// `spawn(format!("{:?}", some_public_key))`.
-	pub fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) {
-		self.spawn_inner(name, task, TaskType::Async)
+	pub fn spawn(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		task: impl Future + Send + 'static,
+	) {
+		self.spawn_inner(name, group, task, TaskType::Async)
 	}
 
 	/// Spawns the blocking task with the given name. See also `spawn`.
 	pub fn spawn_blocking(
 		&self,
 		name: &'static str,
+		group: Option<&'static str>,
 		task: impl Future + Send + 'static,
 	) {
-		self.spawn_inner(name, task, TaskType::Blocking)
+		self.spawn_inner(name, group, task, TaskType::Blocking)
 	}
 
 	/// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`.
 	fn spawn_inner(
 		&self,
 		name: &'static str,
+		group: Option<&'static str>,
 		task: impl Future + Send + 'static,
 		task_type: TaskType,
 	) {
@@ -83,21 +94,23 @@ impl SpawnTaskHandle {
 
 		let on_exit = self.on_exit.clone();
 		let metrics = self.metrics.clone();
+		// If no group is specified use default.
+		let group = group.unwrap_or(DEFAULT_GROUP_NAME);
 
 		// Note that we increase the started counter here and not within the future. This way,
 		// we could properly visualize on Prometheus situations where the spawning doesn't work.
 		if let Some(metrics) = &self.metrics {
-			metrics.tasks_spawned.with_label_values(&[name]).inc();
+			metrics.tasks_spawned.with_label_values(&[name, group]).inc();
 			// We do a dummy increase in order for the task to show up in metrics.
-			metrics.tasks_ended.with_label_values(&[name, "finished"]).inc_by(0);
+			metrics.tasks_ended.with_label_values(&[name, "finished", group]).inc_by(0);
 		}
 
 		let future = async move {
 			if let Some(metrics) = metrics {
 				// Add some wrappers around `task`.
 				let task = {
-					let poll_duration = metrics.poll_duration.with_label_values(&[name]);
-					let poll_start = metrics.poll_start.with_label_values(&[name]);
+					let poll_duration = metrics.poll_duration.with_label_values(&[name, group]);
+					let poll_start = metrics.poll_start.with_label_values(&[name, group]);
 					let inner =
 						prometheus_future::with_poll_durations(poll_duration, poll_start, task);
 					// The logic of `AssertUnwindSafe` here is ok considering that we throw
@@ -108,15 +121,15 @@ impl SpawnTaskHandle {
 
 				match select(on_exit, task).await {
 					Either::Right((Err(payload), _)) => {
-						metrics.tasks_ended.with_label_values(&[name, "panic"]).inc();
+						metrics.tasks_ended.with_label_values(&[name, "panic", group]).inc();
 						panic::resume_unwind(payload)
 					},
 					Either::Right((Ok(()), _)) => {
-						metrics.tasks_ended.with_label_values(&[name, "finished"]).inc();
+						metrics.tasks_ended.with_label_values(&[name, "finished", group]).inc();
 					},
 					Either::Left(((), _)) => {
 						// The `on_exit` has triggered.
-						metrics.tasks_ended.with_label_values(&[name, "interrupted"]).inc();
+						metrics.tasks_ended.with_label_values(&[name, "interrupted", group]).inc();
 					},
 				}
 			} else {
@@ -141,12 +154,22 @@ impl SpawnTaskHandle {
 }
 
 impl sp_core::traits::SpawnNamed for SpawnTaskHandle {
-	fn spawn_blocking(&self, name: &'static str, future: BoxFuture<'static, ()>) {
-		self.spawn_blocking(name, future);
+	fn spawn_blocking(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: BoxFuture<'static, ()>,
+	) {
+		self.spawn_inner(name, group, future, TaskType::Blocking)
 	}
 
-	fn spawn(&self, name: &'static str, future: BoxFuture<'static, ()>) {
-		self.spawn(name, future);
+	fn spawn(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: BoxFuture<'static, ()>,
+	) {
+		self.spawn_inner(name, group, future, TaskType::Async)
 	}
 }
 
@@ -172,8 +195,13 @@ impl SpawnEssentialTaskHandle {
 	/// Spawns the given task with the given name.
 	///
 	/// See also [`SpawnTaskHandle::spawn`].
-	pub fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) {
-		self.spawn_inner(name, task, TaskType::Async)
+	pub fn spawn(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		task: impl Future + Send + 'static,
+	) {
+		self.spawn_inner(name, group, task, TaskType::Async)
 	}
 
 	/// Spawns the blocking task with the given name.
@@ -182,14 +210,16 @@ impl SpawnEssentialTaskHandle {
 	pub fn spawn_blocking(
 		&self,
 		name: &'static str,
+		group: Option<&'static str>,
 		task: impl Future + Send + 'static,
 	) {
-		self.spawn_inner(name, task, TaskType::Blocking)
+		self.spawn_inner(name, group, task, TaskType::Blocking)
 	}
 
 	fn spawn_inner(
 		&self,
 		name: &'static str,
+		group: Option<&'static str>,
 		task: impl Future + Send + 'static,
 		task_type: TaskType,
 	) {
@@ -199,17 +229,27 @@ impl SpawnEssentialTaskHandle {
 			let _ = essential_failed.close_channel();
 		});
 
-		let _ = self.inner.spawn_inner(name, essential_task, task_type);
+		let _ = self.inner.spawn_inner(name, group, essential_task, task_type);
 	}
 }
 
 impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle {
-	fn spawn_essential_blocking(&self, name: &'static str, future: BoxFuture<'static, ()>) {
-		self.spawn_blocking(name, future);
+	fn spawn_essential_blocking(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: BoxFuture<'static, ()>,
+	) {
+		self.spawn_blocking(name, group, future);
 	}
 
-	fn spawn_essential(&self, name: &'static str, future: BoxFuture<'static, ()>) {
-		self.spawn(name, future);
+	fn spawn_essential(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: BoxFuture<'static, ()>,
+	) {
+		self.spawn(name, group, future);
 	}
 }
 
@@ -396,28 +436,28 @@ impl Metrics {
 					buckets: exponential_buckets(0.001, 4.0, 9)
 						.expect("function parameters are constant and always valid; qed"),
 				},
-				&["task_name"]
+				&["task_name", "task_group"]
 			)?, registry)?,
 			poll_start: register(CounterVec::new(
 				Opts::new(
 					"tasks_polling_started_total",
 					"Total number of times we started invoking Future::poll"
 				),
-				&["task_name"]
+				&["task_name", "task_group"]
 			)?, registry)?,
 			tasks_spawned: register(CounterVec::new(
 				Opts::new(
 					"tasks_spawned_total",
 					"Total number of tasks that have been spawned on the Service"
 				),
-				&["task_name"]
+				&["task_name", "task_group"]
 			)?, registry)?,
 			tasks_ended: register(CounterVec::new(
 				Opts::new(
 					"tasks_ended_total",
 					"Total number of tasks for which Future::poll has returned Ready(()) or panicked"
 				),
-				&["task_name", "reason"]
+				&["task_name", "reason", "task_group"]
 			)?, registry)?,
 		})
 	}
diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs
index 291d71ebaf03b..75092ff2ae62e 100644
--- a/client/service/src/task_manager/tests.rs
+++ b/client/service/src/task_manager/tests.rs
@@ -96,8 +96,8 @@ fn ensure_tasks_are_awaited_on_shutdown() {
 	let task_manager = new_task_manager(handle);
 	let spawn_handle = task_manager.spawn_handle();
 	let drop_tester = DropTester::new();
-	spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref()));
-	spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref()));
 	assert_eq!(drop_tester, 2);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
@@ -115,7 +115,7 @@ fn ensure_keep_alive_during_shutdown() {
 	let spawn_handle = task_manager.spawn_handle();
 	let drop_tester = DropTester::new();
 	task_manager.keep_alive(drop_tester.new_ref());
-	spawn_handle.spawn("task1", run_background_task(()));
+	spawn_handle.spawn("task1", None, run_background_task(()));
 	assert_eq!(drop_tester, 1);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
@@ -134,10 +134,12 @@ fn ensure_blocking_futures_are_awaited_on_shutdown() {
 	let drop_tester = DropTester::new();
 	spawn_handle.spawn(
 		"task1",
+		None,
 		run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()),
 	);
 	spawn_handle.spawn(
 		"task2",
+		None,
 		run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()),
 	);
 	assert_eq!(drop_tester, 2);
@@ -156,14 +158,14 @@ fn ensure_no_task_can_be_spawn_after_terminate() {
 	let mut task_manager = new_task_manager(handle);
 	let spawn_handle = task_manager.spawn_handle();
 	let drop_tester = DropTester::new();
-	spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref()));
-	spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref()));
 	assert_eq!(drop_tester, 2);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
 	assert_eq!(drop_tester, 2);
 	task_manager.terminate();
-	spawn_handle.spawn("task3", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task3", None, run_background_task(drop_tester.new_ref()));
 	runtime.block_on(task_manager.clean_shutdown());
 	drop_tester.wait_on_drop();
 }
@@ -176,8 +178,8 @@ fn ensure_task_manager_future_ends_when_task_manager_terminated() {
 	let mut task_manager = new_task_manager(handle);
 	let spawn_handle = task_manager.spawn_handle();
 	let drop_tester = DropTester::new();
-	spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref()));
-	spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref()));
 	assert_eq!(drop_tester, 2);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
@@ -197,13 +199,13 @@ fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() {
 	let spawn_handle = task_manager.spawn_handle();
 	let spawn_essential_handle = task_manager.spawn_essential_handle();
 	let drop_tester = DropTester::new();
-	spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref()));
-	spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref()));
 	assert_eq!(drop_tester, 2);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
 	assert_eq!(drop_tester, 2);
-	spawn_essential_handle.spawn("task3", async { panic!("task failed") });
+	spawn_essential_handle.spawn("task3", None, async { panic!("task failed") });
 	runtime
 		.block_on(task_manager.future())
 		.expect_err("future()'s Result must be Err");
@@ -226,10 +228,10 @@ fn ensure_children_tasks_ends_when_task_manager_terminated() {
 	task_manager.add_child(child_2);
 	let spawn_handle = task_manager.spawn_handle();
 	let drop_tester = DropTester::new();
-	spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref()));
-	spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref()));
-	spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref()));
-	spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref()));
 	assert_eq!(drop_tester, 4);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
@@ -255,15 +257,15 @@ fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails()
 	task_manager.add_child(child_2);
 	let spawn_handle = task_manager.spawn_handle();
 	let drop_tester = DropTester::new();
-	spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref()));
-	spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref()));
-	spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref()));
-	spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref()));
 	assert_eq!(drop_tester, 4);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
 	assert_eq!(drop_tester, 4);
-	spawn_essential_handle_child_1.spawn("task5", async { panic!("task failed") });
+	spawn_essential_handle_child_1.spawn("task5", None, async { panic!("task failed") });
 	runtime
 		.block_on(task_manager.future())
 		.expect_err("future()'s Result must be Err");
@@ -286,15 +288,15 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() {
 	task_manager.add_child(child_2);
 	let spawn_handle = task_manager.spawn_handle();
 	let drop_tester = DropTester::new();
-	spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref()));
-	spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref()));
-	spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref()));
-	spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref()));
+	spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref()));
 	assert_eq!(drop_tester, 4);
 	// allow the tasks to even start
 	runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await });
 	assert_eq!(drop_tester, 4);
-	spawn_handle_child_1.spawn("task5", async { panic!("task failed") });
+	spawn_handle_child_1.spawn("task5", None, async { panic!("task failed") });
 	runtime.block_on(async {
 		let t1 = task_manager.future().fuse();
 		let t2 = tokio::time::sleep(Duration::from_secs(3)).fuse();
diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs
index a735c67d846ce..8af0ea98f8100 100644
--- a/client/transaction-pool/src/api.rs
+++ b/client/transaction-pool/src/api.rs
@@ -64,6 +64,7 @@ fn spawn_validation_pool_task(
 ) {
 	spawner.spawn_essential_blocking(
 		name,
+		Some("transaction-pool"),
 		async move {
 			loop {
 				let task = receiver.lock().await.next().await;
diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs
index 8af73c3fe5b48..3565cb52ad87b 100644
--- a/client/transaction-pool/src/lib.rs
+++ b/client/transaction-pool/src/lib.rs
@@ -217,7 +217,7 @@ where
 		};
 
 		if let Some(background_task) = background_task {
-			spawner.spawn_essential("txpool-background", background_task);
+			spawner.spawn_essential("txpool-background", Some("transaction-pool"), background_task);
 		}
 
 		Self {
diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs
index a7fff0def83f2..a40a37804c031 100644
--- a/primitives/core/src/testing.rs
+++ b/primitives/core/src/testing.rs
@@ -152,10 +152,20 @@ impl Default for TaskExecutor {
 
 #[cfg(feature = "std")]
 impl crate::traits::SpawnNamed for TaskExecutor {
-	fn spawn_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
+	fn spawn_blocking(
+		&self,
+		_name: &'static str,
+		_group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
 		self.0.spawn_ok(future);
 	}
-	fn spawn(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
+	fn spawn(
+		&self,
+		_name: &'static str,
+		_group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
 		self.0.spawn_ok(future);
 	}
 }
@@ -165,11 +175,17 @@ impl crate::traits::SpawnEssentialNamed for TaskExecutor {
 	fn spawn_essential_blocking(
 		&self,
 		_: &'static str,
+		_: Option<&'static str>,
 		future: futures::future::BoxFuture<'static, ()>,
 	) {
 		self.0.spawn_ok(future);
 	}
-	fn spawn_essential(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) {
+	fn spawn_essential(
+		&self,
+		_: &'static str,
+		_: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
 		self.0.spawn_ok(future);
 	}
 }
diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs
index 47639f9d87ba6..e3d7d8e283e21 100644
--- a/primitives/core/src/traits.rs
+++ b/primitives/core/src/traits.rs
@@ -190,58 +190,91 @@ sp_externalities::decl_extension! {
 	pub struct RuntimeSpawnExt(Box);
 }
 
-/// Something that can spawn tasks (blocking and non-blocking) with an assigned name.
+/// Something that can spawn tasks (blocking and non-blocking) with an assigned name
+/// and optional group.
 #[dyn_clonable::clonable]
 pub trait SpawnNamed: Clone + Send + Sync {
 	/// Spawn the given blocking future.
 	///
-	/// The given `name` is used to identify the future in tracing.
-	fn spawn_blocking(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>);
+	/// The given `group` and `name` is used to identify the future in tracing.
+	fn spawn_blocking(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	);
 	/// Spawn the given non-blocking future.
 	///
-	/// The given `name` is used to identify the future in tracing.
-	fn spawn(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>);
+	/// The given `group` and `name` is used to identify the future in tracing.
+	fn spawn(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	);
 }
 
 impl SpawnNamed for Box {
-	fn spawn_blocking(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) {
-		(**self).spawn_blocking(name, future)
+	fn spawn_blocking(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
+		(**self).spawn_blocking(name, group, future)
 	}
-
-	fn spawn(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) {
-		(**self).spawn(name, future)
+	fn spawn(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
+		(**self).spawn(name, group, future)
 	}
 }
 
-/// Something that can spawn essential tasks (blocking and non-blocking) with an assigned name.
+/// Something that can spawn essential tasks (blocking and non-blocking) with an assigned name
+/// and optional group.
 ///
 /// Essential tasks are special tasks that should take down the node when they end.
 #[dyn_clonable::clonable]
 pub trait SpawnEssentialNamed: Clone + Send + Sync {
 	/// Spawn the given blocking future.
 	///
-	/// The given `name` is used to identify the future in tracing.
+	/// The given `group` and `name` is used to identify the future in tracing.
 	fn spawn_essential_blocking(
 		&self,
 		name: &'static str,
+		group: Option<&'static str>,
 		future: futures::future::BoxFuture<'static, ()>,
 	);
 	/// Spawn the given non-blocking future.
 	///
-	/// The given `name` is used to identify the future in tracing.
-	fn spawn_essential(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>);
+	/// The given `group` and `name` is used to identify the future in tracing.
+	fn spawn_essential(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	);
 }
 
 impl SpawnEssentialNamed for Box {
 	fn spawn_essential_blocking(
 		&self,
 		name: &'static str,
+		group: Option<&'static str>,
 		future: futures::future::BoxFuture<'static, ()>,
 	) {
-		(**self).spawn_essential_blocking(name, future)
+		(**self).spawn_essential_blocking(name, group, future)
 	}
 
-	fn spawn_essential(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) {
-		(**self).spawn_essential(name, future)
+	fn spawn_essential(
+		&self,
+		name: &'static str,
+		group: Option<&'static str>,
+		future: futures::future::BoxFuture<'static, ()>,
+	) {
+		(**self).spawn_essential(name, group, future)
 	}
 }
diff --git a/primitives/io/src/batch_verifier.rs b/primitives/io/src/batch_verifier.rs
index b6da1d85907bd..05c8a63694eb3 100644
--- a/primitives/io/src/batch_verifier.rs
+++ b/primitives/io/src/batch_verifier.rs
@@ -74,6 +74,7 @@ impl BatchVerifier {
 
 		self.scheduler.spawn(
 			name,
+			None,
 			async move {
 				if !f() {
 					invalid_clone.store(true, AtomicOrdering::Relaxed);
@@ -177,7 +178,8 @@ impl BatchVerifier {
 		if pending.len() > 0 {
 			let (sender, receiver) = std::sync::mpsc::channel();
 			self.scheduler.spawn(
-				"substrate_batch_verify_join",
+				"substrate-batch-verify-join",
+				None,
 				async move {
 					futures::future::join_all(pending).await;
 					sender.send(()).expect(
diff --git a/primitives/tasks/src/lib.rs b/primitives/tasks/src/lib.rs
index e9c80ae5ff4c8..c874bb98e1ae6 100644
--- a/primitives/tasks/src/lib.rs
+++ b/primitives/tasks/src/lib.rs
@@ -95,6 +95,7 @@ mod inner {
 		let extra_scheduler = scheduler.clone();
 		scheduler.spawn(
 			"parallel-runtime-spawn",
+			Some("substrate-runtime"),
 			Box::pin(async move {
 				let result = match crate::new_async_externalities(extra_scheduler) {
 					Ok(mut ext) => {
diff --git a/test-utils/test-runner/src/client.rs b/test-utils/test-runner/src/client.rs
index 58c4cf6503a93..27c04c40fe6fe 100644
--- a/test-utils/test-runner/src/client.rs
+++ b/test-utils/test-runner/src/client.rs
@@ -235,7 +235,9 @@ where
 	});
 
 	// spawn the authorship task as an essential task.
-	task_manager.spawn_essential_handle().spawn("manual-seal", authorship_future);
+	task_manager
+		.spawn_essential_handle()
+		.spawn("manual-seal", None, authorship_future);
 
 	network_starter.start_network();
 	let rpc_handler = rpc_handlers.io_handler();

From dc899259640256c0c545b262dd3388b126d52b01 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Nov 2021 19:43:04 +0100
Subject: [PATCH 084/162] Bump git2 from 0.13.22 to 0.13.23 (#10238)

Bumps [git2](https://github.com/rust-lang/git2-rs) from 0.13.22 to 0.13.23.
- [Release notes](https://github.com/rust-lang/git2-rs/releases)
- [Commits](https://github.com/rust-lang/git2-rs/compare/0.13.22...0.13.23)

---
updated-dependencies:
- dependency-name: git2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] 

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 Cargo.lock                           | 8 ++++----
 utils/frame/generate-bags/Cargo.toml | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 423ec1de4e7de..5d0251bca6316 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2479,9 +2479,9 @@ dependencies = [
 
 [[package]]
 name = "git2"
-version = "0.13.22"
+version = "0.13.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c1cbbfc9a1996c6af82c2b4caf828d2c653af4fcdbb0e5674cc966eee5a4197"
+checksum = "2a8057932925d3a9d9e4434ea016570d37420ddb1ceed45a174d577f24ed6700"
 dependencies = [
  "bitflags",
  "libc",
@@ -3313,9 +3313,9 @@ checksum = "869d572136620d55835903746bcb5cdc54cb2851fd0aeec53220b4bb65ef3013"
 
 [[package]]
 name = "libgit2-sys"
-version = "0.12.23+1.2.0"
+version = "0.12.24+1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "29730a445bae719db3107078b46808cc45a5b7a6bae3f31272923af969453356"
+checksum = "ddbd6021eef06fb289a8f54b3c2acfdd85ff2a585dfbb24b8576325373d2152c"
 dependencies = [
  "cc",
  "libc",
diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml
index 225584b69069e..4bc27021b4c57 100644
--- a/utils/frame/generate-bags/Cargo.toml
+++ b/utils/frame/generate-bags/Cargo.toml
@@ -21,6 +21,6 @@ sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" }
 
 # third party
 chrono = { version = "0.4.19" }
-git2 = { version = "0.13.20", default-features = false }
+git2 = { version = "0.13.23", default-features = false }
 num-format = { version = "0.4.0" }
 structopt = "0.3.21"

From e5abe5566e5a76b3deb9b6c2766e88d32ac07eb0 Mon Sep 17 00:00:00 2001
From: Guillaume Thiolliere 
Date: Thu, 11 Nov 2021 23:53:43 +0100
Subject: [PATCH 085/162] Warn about usage of pallet collective set members
 call. (#10156)

* warn

* Apply suggestions from code review

Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com>

* fmt

Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com>
---
 frame/collective/src/lib.rs | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs
index 89d4c8a150c36..2797d01ffcdba 100644
--- a/frame/collective/src/lib.rs
+++ b/frame/collective/src/lib.rs
@@ -345,6 +345,13 @@ pub mod pallet {
 		/// NOTE: Does not enforce the expected `MaxMembers` limit on the amount of members, but
 		///       the weight estimations rely on it to estimate dispatchable weight.
 		///
+		/// # WARNING:
+		///
+		/// The `pallet-collective` can also be managed by logic outside of the pallet through the
+		/// implementation of the trait [`ChangeMembers`].
+		/// Any call to `set_members` must be careful that the member set doesn't get out of sync
+		/// with other logic managing the member set.
+		///
 		/// # 
 		/// ## Weight
 		/// - `O(MP + N)` where:

From 2208ac43327d4869b3fd3fcfa44eb1a9ba11bb01 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 11 Nov 2021 23:32:27 +0000
Subject: [PATCH 086/162] Bump thiserror from 1.0.26 to 1.0.30 (#10240)

Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.26 to 1.0.30.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.26...1.0.30)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] 

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
 Cargo.lock                             | 8 ++++----
 client/allocator/Cargo.toml            | 2 +-
 client/api/Cargo.toml                  | 2 +-
 client/cli/Cargo.toml                  | 2 +-
 client/consensus/common/Cargo.toml     | 2 +-
 client/consensus/slots/Cargo.toml      | 2 +-
 client/consensus/uncles/Cargo.toml     | 2 +-
 client/executor/common/Cargo.toml      | 2 +-
 client/service/Cargo.toml              | 2 +-
 client/sync-state-rpc/Cargo.toml       | 2 +-
 client/telemetry/Cargo.toml            | 2 +-
 client/tracing/Cargo.toml              | 2 +-
 client/transaction-pool/Cargo.toml     | 2 +-
 client/transaction-pool/api/Cargo.toml | 2 +-
 primitives/api/Cargo.toml              | 2 +-
 primitives/blockchain/Cargo.toml       | 2 +-
 primitives/consensus/common/Cargo.toml | 2 +-
 primitives/core/Cargo.toml             | 2 +-
 primitives/inherents/Cargo.toml        | 2 +-
 primitives/state-machine/Cargo.toml    | 2 +-
 primitives/timestamp/Cargo.toml        | 2 +-
 primitives/version/Cargo.toml          | 2 +-
 22 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index 5d0251bca6316..5d2fa99ca55a5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -10461,18 +10461,18 @@ dependencies = [
 
 [[package]]
 name = "thiserror"
-version = "1.0.26"
+version = "1.0.30"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2"
+checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417"
 dependencies = [
  "thiserror-impl",
 ]
 
 [[package]]
 name = "thiserror-impl"
-version = "1.0.26"
+version = "1.0.30"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745"
+checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b"
 dependencies = [
  "proc-macro2",
  "quote",
diff --git a/client/allocator/Cargo.toml b/client/allocator/Cargo.toml
index 6d324b09acde5..44facf0ad8892 100644
--- a/client/allocator/Cargo.toml
+++ b/client/allocator/Cargo.toml
@@ -17,4 +17,4 @@ targets = ["x86_64-unknown-linux-gnu"]
 sp-core = { version = "4.0.0-dev", path = "../../primitives/core" }
 sp-wasm-interface = { version = "4.0.0-dev", path = "../../primitives/wasm-interface" }
 log = "0.4.11"
-thiserror = "1.0.21"
+thiserror = "1.0.30"
diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml
index 431d6e2fb0157..39fe804a1415d 100644
--- a/client/api/Cargo.toml
+++ b/client/api/Cargo.toml
@@ -41,4 +41,4 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.
 [dev-dependencies]
 sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" }
 substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" }
-thiserror = "1.0.21"
+thiserror = "1.0.30"
diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml
index 6107206be34e8..9dc7ff730600b 100644
--- a/client/cli/Cargo.toml
+++ b/client/cli/Cargo.toml
@@ -42,7 +42,7 @@ structopt = "0.3.8"
 sc-tracing = { version = "4.0.0-dev", path = "../tracing" }
 chrono = "0.4.10"
 serde = "1.0.126"
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 rpassword = "5.0.0"
 
 [dev-dependencies]
diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml
index 3f8380d3f81bc..c26e250edf24c 100644
--- a/client/consensus/common/Cargo.toml
+++ b/client/consensus/common/Cargo.toml
@@ -13,7 +13,7 @@ readme = "README.md"
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 libp2p = { version = "0.39.1", default-features = false }
 log = "0.4.8"
 futures = { version = "0.3.1", features = ["thread-pool"] }
diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml
index 5c5f1bdfa68cd..ff4fdf041b4fe 100644
--- a/client/consensus/slots/Cargo.toml
+++ b/client/consensus/slots/Cargo.toml
@@ -31,7 +31,7 @@ sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" }
 futures = "0.3.9"
 futures-timer = "3.0.1"
 log = "0.4.11"
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 async-trait = "0.1.50"
 
 [dev-dependencies]
diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml
index f644d64c7bbeb..bc5f1f25838c9 100644
--- a/client/consensus/uncles/Cargo.toml
+++ b/client/consensus/uncles/Cargo.toml
@@ -16,4 +16,4 @@ targets = ["x86_64-unknown-linux-gnu"]
 sc-client-api = { version = "4.0.0-dev", path = "../../api" }
 sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" }
 sp-authorship = { version = "4.0.0-dev", path = "../../../primitives/authorship" }
-thiserror = "1.0.21"
+thiserror = "1.0.30"
diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml
index 2d9f4672768e9..276fe9a8380b4 100644
--- a/client/executor/common/Cargo.toml
+++ b/client/executor/common/Cargo.toml
@@ -23,7 +23,7 @@ sc-allocator = { version = "4.1.0-dev", path = "../../allocator" }
 sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-interface" }
 sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" }
 sp-serializer = { version = "4.0.0-dev", path = "../../../primitives/serializer" }
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 environmental = "1.1.3"
 
 wasmer = { version = "1.0", optional = true }
diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml
index c090495fe04ec..38729fd4ba5ce 100644
--- a/client/service/Cargo.toml
+++ b/client/service/Cargo.toml
@@ -22,7 +22,7 @@ wasmtime = ["sc-executor/wasmtime"]
 test-helpers = []
 
 [dependencies]
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 futures = "0.3.16"
 jsonrpc-pubsub = "18.0"
 jsonrpc-core = "18.0"
diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml
index a9503b992ab37..6d8d95954629c 100644
--- a/client/sync-state-rpc/Cargo.toml
+++ b/client/sync-state-rpc/Cargo.toml
@@ -13,7 +13,7 @@ readme = "README.md"
 targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 jsonrpc-core = "18.0.0"
 jsonrpc-core-client = "18.0.0"
 jsonrpc-derive = "18.0.0"
diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml
index 5a25aca29b60b..744a0610a07e3 100644
--- a/client/telemetry/Cargo.toml
+++ b/client/telemetry/Cargo.toml
@@ -25,4 +25,4 @@ rand = "0.7.2"
 serde = { version = "1.0.126", features = ["derive"] }
 serde_json = "1.0.68"
 chrono = "0.4.19"
-thiserror = "1.0.21"
+thiserror = "1.0.30"
diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml
index 8640208ad3427..6a969b33deb1a 100644
--- a/client/tracing/Cargo.toml
+++ b/client/tracing/Cargo.toml
@@ -24,7 +24,7 @@ parking_lot = "0.11.1"
 regex = "1.5.4"
 rustc-hash = "1.1.0"
 serde = "1.0.126"
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 tracing = "0.1.29"
 tracing-log = "0.1.2"
 tracing-subscriber = { version = "0.2.25", features = ["parking_lot"] }
diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml
index d26969cf1e092..3d2a450c4101a 100644
--- a/client/transaction-pool/Cargo.toml
+++ b/client/transaction-pool/Cargo.toml
@@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 codec = { package = "parity-scale-codec", version = "2.0.0" }
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 futures = "0.3.16"
 intervalier = "0.4.0"
 log = "0.4.8"
diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml
index 176624611fbf5..1ab1ef5bb4a16 100644
--- a/client/transaction-pool/api/Cargo.toml
+++ b/client/transaction-pool/api/Cargo.toml
@@ -12,7 +12,7 @@ description = "Transaction pool client facing API."
 futures = { version = "0.3.1"  }
 log = { version = "0.4.8" }
 serde = { version = "1.0.126", features = ["derive"] }
-thiserror = { version = "1.0.21" }
+thiserror = { version = "1.0.30" }
 sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" }
 
 derive_more = { version = "0.99.11" }
diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml
index 65da54b87753d..5723f9cf11b51 100644
--- a/primitives/api/Cargo.toml
+++ b/primitives/api/Cargo.toml
@@ -21,7 +21,7 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runti
 sp-version = { version = "4.0.0-dev", default-features = false, path = "../version" }
 sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../state-machine" }
 hash-db = { version = "0.15.2", optional = true }
-thiserror = { version = "1.0.21", optional = true }
+thiserror = { version = "1.0.30", optional = true }
 
 log = { version = "0.4.14", default-features = false }
 
diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml
index de73af10966e1..02e74a63d9590 100644
--- a/primitives/blockchain/Cargo.toml
+++ b/primitives/blockchain/Cargo.toml
@@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 log = "0.4.11"
 lru = "0.7.0"
 parking_lot = "0.11.1"
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 futures = "0.3.9"
 codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
 sp-consensus = { version = "0.10.0-dev", path = "../consensus/common" }
diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml
index 735b1e8eb095d..08e50a954288c 100644
--- a/primitives/consensus/common/Cargo.toml
+++ b/primitives/consensus/common/Cargo.toml
@@ -27,7 +27,7 @@ futures-timer = "3.0.1"
 sp-std = { version = "4.0.0-dev", path = "../../std" }
 sp-version = { version = "4.0.0-dev", path = "../../version" }
 sp-runtime = { version = "4.0.0-dev", path = "../../runtime" }
-thiserror = "1.0.21"
+thiserror = "1.0.30"
 
 [dev-dependencies]
 futures = "0.3.9"
diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml
index 48749904763fd..eaf1d26c2898c 100644
--- a/primitives/core/Cargo.toml
+++ b/primitives/core/Cargo.toml
@@ -48,7 +48,7 @@ parity-util-mem = { version = "0.10.2", default-features = false, features = [
 ] }
 futures = { version = "0.3.1", optional = true }
 dyn-clonable = { version = "0.9.0", optional = true }
-thiserror = { version = "1.0.21", optional = true }
+thiserror = { version = "1.0.30", optional = true }
 bitflags = "1.3"
 
 # full crypto
diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml
index d52140d94ed31..3efd6ab032131 100644
--- a/primitives/inherents/Cargo.toml
+++ b/primitives/inherents/Cargo.toml
@@ -19,7 +19,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" }
 sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" }
 sp-runtime = { version = "4.0.0-dev", path = "../runtime", optional = true }
 codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
-thiserror = { version = "1.0.21", optional = true }
+thiserror = { version = "1.0.30", optional = true }
 impl-trait-for-tuples = "0.2.0"
 async-trait = { version = "0.1.50", optional = true }
 
diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml
index c919d7fbef6ad..783837a6442b6 100644
--- a/primitives/state-machine/Cargo.toml
+++ b/primitives/state-machine/Cargo.toml
@@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 
 [dependencies]
 log = { version = "0.4.11", optional = true }
-thiserror = { version = "1.0.21", optional = true }
+thiserror = { version = "1.0.30", optional = true }
 parking_lot = { version = "0.11.1", optional = true }
 hash-db = { version = "0.15.2", default-features = false }
 trie-db = { version = "0.22.6", default-features = false }
diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml
index 552a3cb5e8d63..609ecd0d31921 100644
--- a/primitives/timestamp/Cargo.toml
+++ b/primitives/timestamp/Cargo.toml
@@ -18,7 +18,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" }
 sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" }
 codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
 sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" }
-thiserror = { version = "1.0.21", optional = true }
+thiserror = { version = "1.0.30", optional = true }
 log = { version = "0.4.8", optional = true }
 futures-timer = { version = "3.0.2", optional = true }
 async-trait = { version = "0.1.50", optional = true }
diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml
index af44aed6c5b21..bbdaf9c1cab1b 100644
--- a/primitives/version/Cargo.toml
+++ b/primitives/version/Cargo.toml
@@ -23,7 +23,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" }
 sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runtime" }
 sp-version-proc-macro = { version = "4.0.0-dev", default-features = false, path = "proc-macro" }
 parity-wasm = { version = "0.42.2", optional = true }
-thiserror = { version = "1.0.21", optional = true }
+thiserror = { version = "1.0.30", optional = true }
 
 [features]
 default = ["std"]

From 5d22361ebbd9b5ccafd87f3f50d2646ad59b6c0b Mon Sep 17 00:00:00 2001
From: Doordashcon <90750465+Doordashcon@users.noreply.github.com>
Date: Fri, 12 Nov 2021 00:48:37 +0100
Subject: [PATCH 087/162] tuple to struct event variants (#10206)

* update sudo pallet

* Update mock.rs

* cargo +nightly fmt

* frame-support remote-externalities

* AFNPEV tips

* AFNPEV bin & update sudo

* cargo +nightly fmt

* optional dependency remote-test feature

* fmt

Co-authored-by: Shawn Tabrizi 
---
 bin/node/executor/tests/fees.rs             |  9 ++++--
 frame/sudo/src/lib.rs                       | 16 +++++----
 frame/sudo/src/mock.rs                      |  8 ++---
 frame/sudo/src/tests.rs                     | 10 +++---
 frame/tips/src/lib.rs                       | 36 ++++++++++++---------
 frame/tips/src/tests.rs                     | 10 +++---
 utils/frame/remote-externalities/Cargo.toml |  3 +-
 7 files changed, 51 insertions(+), 41 deletions(-)

diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs
index 379cdda5b76a3..4c7593bdb3ab2 100644
--- a/bin/node/executor/tests/fees.rs
+++ b/bin/node/executor/tests/fees.rs
@@ -241,7 +241,10 @@ fn block_weight_capacity_report() {
 		let mut xts = (0..num_transfers)
 			.map(|i| CheckedExtrinsic {
 				signed: Some((charlie(), signed_extra(nonce + i as Index, 0))),
-				function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)),
+				function: Call::Balances(pallet_balances::Call::transfer {
+					dest: bob().into(),
+					value: 0,
+				}),
 			})
 			.collect::>();
 
@@ -249,7 +252,7 @@ fn block_weight_capacity_report() {
 			0,
 			CheckedExtrinsic {
 				signed: None,
-				function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)),
+				function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }),
 			},
 		);
 
@@ -319,7 +322,7 @@ fn block_length_capacity_report() {
 			vec![
 				CheckedExtrinsic {
 					signed: None,
-					function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)),
+					function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }),
 				},
 				CheckedExtrinsic {
 					signed: Some((charlie(), signed_extra(nonce, 0))),
diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs
index 427455849bb00..5f8e6fc0cc13a 100644
--- a/frame/sudo/src/lib.rs
+++ b/frame/sudo/src/lib.rs
@@ -150,7 +150,7 @@ pub mod pallet {
 			ensure!(sender == Self::key(), Error::::RequireSudo);
 
 			let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into());
-			Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error)));
+			Self::deposit_event(Event::Sudid { sudo_result: res.map(|_| ()).map_err(|e| e.error) });
 			// Sudo user does not pay a fee.
 			Ok(Pays::No.into())
 		}
@@ -176,7 +176,7 @@ pub mod pallet {
 			ensure!(sender == Self::key(), Error::::RequireSudo);
 
 			let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into());
-			Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error)));
+			Self::deposit_event(Event::Sudid { sudo_result: res.map(|_| ()).map_err(|e| e.error) });
 			// Sudo user does not pay a fee.
 			Ok(Pays::No.into())
 		}
@@ -201,7 +201,7 @@ pub mod pallet {
 			ensure!(sender == Self::key(), Error::::RequireSudo);
 			let new = T::Lookup::lookup(new)?;
 
-			Self::deposit_event(Event::KeyChanged(Self::key()));
+			Self::deposit_event(Event::KeyChanged { new_sudoer: Self::key() });
 			>::put(new);
 			// Sudo user does not pay a fee.
 			Ok(Pays::No.into())
@@ -241,7 +241,9 @@ pub mod pallet {
 
 			let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into());
 
-			Self::deposit_event(Event::SudoAsDone(res.map(|_| ()).map_err(|e| e.error)));
+			Self::deposit_event(Event::SudoAsDone {
+				sudo_result: res.map(|_| ()).map_err(|e| e.error),
+			});
 			// Sudo user does not pay a fee.
 			Ok(Pays::No.into())
 		}
@@ -251,11 +253,11 @@ pub mod pallet {
 	#[pallet::generate_deposit(pub(super) fn deposit_event)]
 	pub enum Event {
 		/// A sudo just took place. \[result\]
-		Sudid(DispatchResult),
+		Sudid { sudo_result: DispatchResult },
 		/// The \[sudoer\] just switched identity; the old key is supplied.
-		KeyChanged(T::AccountId),
+		KeyChanged { new_sudoer: T::AccountId },
 		/// A sudo just took place. \[result\]
-		SudoAsDone(DispatchResult),
+		SudoAsDone { sudo_result: DispatchResult },
 	}
 
 	#[pallet::error]
diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs
index dad17384d5603..bfbed0d38ab34 100644
--- a/frame/sudo/src/mock.rs
+++ b/frame/sudo/src/mock.rs
@@ -58,7 +58,7 @@ pub mod logger {
 			// Ensure that the `origin` is `Root`.
 			ensure_root(origin)?;
 			>::append(i);
-			Self::deposit_event(Event::AppendI32(i, weight));
+			Self::deposit_event(Event::AppendI32 { value: i, weight });
 			Ok(().into())
 		}
 
@@ -72,7 +72,7 @@ pub mod logger {
 			let sender = ensure_signed(origin)?;
 			>::append(i);
 			>::append(sender.clone());
-			Self::deposit_event(Event::AppendI32AndAccount(sender, i, weight));
+			Self::deposit_event(Event::AppendI32AndAccount { sender, value: i, weight });
 			Ok(().into())
 		}
 	}
@@ -80,8 +80,8 @@ pub mod logger {
 	#[pallet::event]
 	#[pallet::generate_deposit(pub(super) fn deposit_event)]
 	pub enum Event {
-		AppendI32(i32, Weight),
-		AppendI32AndAccount(T::AccountId, i32, Weight),
+		AppendI32 { value: i32, weight: Weight },
+		AppendI32AndAccount { sender: T::AccountId, value: i32, weight: Weight },
 	}
 
 	#[pallet::storage]
diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs
index 2eb558e9471c4..3fd199a1c8ca3 100644
--- a/frame/sudo/src/tests.rs
+++ b/frame/sudo/src/tests.rs
@@ -58,7 +58,7 @@ fn sudo_emits_events_correctly() {
 		// Should emit event to indicate success when called with the root `key` and `call` is `Ok`.
 		let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 }));
 		assert_ok!(Sudo::sudo(Origin::signed(1), call));
-		System::assert_has_event(TestEvent::Sudo(Event::Sudid(Ok(()))));
+		System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) }));
 	})
 }
 
@@ -96,7 +96,7 @@ fn sudo_unchecked_weight_emits_events_correctly() {
 		// Should emit event to indicate success when called with the root `key` and `call` is `Ok`.
 		let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 }));
 		assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000));
-		System::assert_has_event(TestEvent::Sudo(Event::Sudid(Ok(()))));
+		System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) }));
 	})
 }
 
@@ -123,10 +123,10 @@ fn set_key_emits_events_correctly() {
 
 		// A root `key` can change the root `key`.
 		assert_ok!(Sudo::set_key(Origin::signed(1), 2));
-		System::assert_has_event(TestEvent::Sudo(Event::KeyChanged(1)));
+		System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { new_sudoer: 1 }));
 		// Double check.
 		assert_ok!(Sudo::set_key(Origin::signed(2), 4));
-		System::assert_has_event(TestEvent::Sudo(Event::KeyChanged(2)));
+		System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { new_sudoer: 2 }));
 	});
 }
 
@@ -161,6 +161,6 @@ fn sudo_as_emits_events_correctly() {
 		// A non-privileged function will work when passed to `sudo_as` with the root `key`.
 		let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 }));
 		assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call));
-		System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone(Ok(()))));
+		System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone { sudo_result: Ok(()) }));
 	});
 }
diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs
index f4a4edb7b3999..5aad9af346603 100644
--- a/frame/tips/src/lib.rs
+++ b/frame/tips/src/lib.rs
@@ -179,16 +179,16 @@ pub mod pallet {
 	#[pallet::event]
 	#[pallet::generate_deposit(pub(super) fn deposit_event)]
 	pub enum Event {
-		/// A new tip suggestion has been opened. \[tip_hash\]
-		NewTip(T::Hash),
-		/// A tip suggestion has reached threshold and is closing. \[tip_hash\]
-		TipClosing(T::Hash),
-		/// A tip suggestion has been closed. \[tip_hash, who, payout\]
-		TipClosed(T::Hash, T::AccountId, BalanceOf),
-		/// A tip suggestion has been retracted. \[tip_hash\]
-		TipRetracted(T::Hash),
-		/// A tip suggestion has been slashed. \[tip_hash, finder, deposit\]
-		TipSlashed(T::Hash, T::AccountId, BalanceOf),
+		/// A new tip suggestion has been opened.
+		NewTip { tip_hash: T::Hash },
+		/// A tip suggestion has reached threshold and is closing.
+		TipClosing { tip_hash: T::Hash },
+		/// A tip suggestion has been closed.
+		TipClosed { tip_hash: T::Hash, who: T::AccountId, payout: BalanceOf },
+		/// A tip suggestion has been retracted.
+		TipRetracted { tip_hash: T::Hash },
+		/// A tip suggestion has been slashed.
+		TipSlashed { tip_hash: T::Hash, finder: T::AccountId, deposit: BalanceOf },
 	}
 
 	/// Old name generated by `decl_event`.
@@ -265,7 +265,7 @@ pub mod pallet {
 				finders_fee: true,
 			};
 			Tips::::insert(&hash, tip);
-			Self::deposit_event(Event::NewTip(hash));
+			Self::deposit_event(Event::NewTip { tip_hash: hash });
 			Ok(())
 		}
 
@@ -300,7 +300,7 @@ pub mod pallet {
 				let err_amount = T::Currency::unreserve(&who, tip.deposit);
 				debug_assert!(err_amount.is_zero());
 			}
-			Self::deposit_event(Event::TipRetracted(hash));
+			Self::deposit_event(Event::TipRetracted { tip_hash: hash });
 			Ok(())
 		}
 
@@ -340,7 +340,7 @@ pub mod pallet {
 			let hash = T::Hashing::hash_of(&(&reason_hash, &who));
 
 			Reasons::::insert(&reason_hash, &reason);
-			Self::deposit_event(Event::NewTip(hash.clone()));
+			Self::deposit_event(Event::NewTip { tip_hash: hash.clone() });
 			let tips = vec![(tipper.clone(), tip_value)];
 			let tip = OpenTip {
 				reason: reason_hash,
@@ -390,7 +390,7 @@ pub mod pallet {
 
 			let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?;
 			if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) {
-				Self::deposit_event(Event::TipClosing(hash.clone()));
+				Self::deposit_event(Event::TipClosing { tip_hash: hash.clone() });
 			}
 			Tips::::insert(&hash, tip);
 			Ok(())
@@ -449,7 +449,11 @@ pub mod pallet {
 				T::OnSlash::on_unbalanced(imbalance);
 			}
 			Reasons::::remove(&tip.reason);
-			Self::deposit_event(Event::TipSlashed(hash, tip.finder, tip.deposit));
+			Self::deposit_event(Event::TipSlashed {
+				tip_hash: hash,
+				finder: tip.finder,
+				deposit: tip.deposit,
+			});
 			Ok(())
 		}
 	}
@@ -544,7 +548,7 @@ impl Pallet {
 		// same as above: best-effort only.
 		let res = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive);
 		debug_assert!(res.is_ok());
-		Self::deposit_event(Event::TipClosed(hash, tip.who, payout));
+		Self::deposit_event(Event::TipClosed { tip_hash: hash, who: tip.who, payout });
 	}
 
 	pub fn migrate_retract_tip_for_tip_new(module: &[u8], item: &[u8]) {
diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs
index 7ea80d78c5532..2aac22ffe18ca 100644
--- a/frame/tips/src/tests.rs
+++ b/frame/tips/src/tests.rs
@@ -267,7 +267,7 @@ fn close_tip_works() {
 
 		let h = tip_hash();
 
-		assert_eq!(last_event(), TipEvent::NewTip(h));
+		assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h });
 
 		assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10));
 
@@ -275,7 +275,7 @@ fn close_tip_works() {
 
 		assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10));
 
-		assert_eq!(last_event(), TipEvent::TipClosing(h));
+		assert_eq!(last_event(), TipEvent::TipClosing { tip_hash: h });
 
 		assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::Premature);
 
@@ -284,7 +284,7 @@ fn close_tip_works() {
 		assert_ok!(Tips::close_tip(Origin::signed(0), h.into()));
 		assert_eq!(Balances::free_balance(3), 10);
 
-		assert_eq!(last_event(), TipEvent::TipClosed(h, 3, 10));
+		assert_eq!(last_event(), TipEvent::TipClosed { tip_hash: h, who: 3, payout: 10 });
 
 		assert_noop!(Tips::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip);
 	});
@@ -306,14 +306,14 @@ fn slash_tip_works() {
 		assert_eq!(Balances::free_balance(0), 88);
 
 		let h = tip_hash();
-		assert_eq!(last_event(), TipEvent::NewTip(h));
+		assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h });
 
 		// can't remove from any origin
 		assert_noop!(Tips::slash_tip(Origin::signed(0), h.clone()), BadOrigin);
 
 		// can remove from root.
 		assert_ok!(Tips::slash_tip(Origin::root(), h.clone()));
-		assert_eq!(last_event(), TipEvent::TipSlashed(h, 0, 12));
+		assert_eq!(last_event(), TipEvent::TipSlashed { tip_hash: h, finder: 0, deposit: 12 });
 
 		// tipper slashed
 		assert_eq!(Balances::reserved_balance(0), 0);
diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml
index f2482f9c423db..56f797343c0f4 100644
--- a/utils/frame/remote-externalities/Cargo.toml
+++ b/utils/frame/remote-externalities/Cargo.toml
@@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
 jsonrpsee = { version = "0.4.1", features = ["ws-client", "macros"] }
 
 env_logger = "0.9"
+frame-support = { path = "../../../frame/support", optional = true }
 log = "0.4.11"
 codec = { package = "parity-scale-codec", version = "2.0.0" }
 serde_json = "1.0"
@@ -32,4 +33,4 @@ pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", versio
 frame-support = { path = "../../../frame/support", version = "4.0.0-dev" }
 
 [features]
-remote-test = []
+remote-test = ["frame-support"]

From c45c1cbfde0037e6ef650146789a0aa0190fdefb Mon Sep 17 00:00:00 2001
From: Arkadiy Paronyan 
Date: Fri, 12 Nov 2021 14:15:01 +0100
Subject: [PATCH 088/162] Kill the light client, CHTs and change tries.
 (#10080)

* Remove light client, change tries and CHTs

* Update tests

* fmt

* Restore changes_root

* Fixed benches

* Cargo fmt

* fmt

* fmt
---
 Cargo.lock                                    |   20 -
 Cargo.toml                                    |    1 -
 bin/node-template/node/src/chain_spec.rs      |    1 -
 bin/node-template/node/src/service.rs         |    3 -
 bin/node/cli/src/chain_spec.rs                |    5 +-
 bin/node/cli/src/service.rs                   |    7 +-
 bin/node/executor/benches/bench.rs            |    4 +-
 bin/node/executor/tests/basic.rs              |   72 +-
 bin/node/executor/tests/common.rs             |   11 +-
 bin/node/executor/tests/fees.rs               |   14 +-
 bin/node/executor/tests/submit_transaction.rs |   12 +-
 bin/node/primitives/src/lib.rs                |    2 +-
 bin/node/rpc/src/lib.rs                       |   35 -
 bin/node/testing/src/bench.rs                 |    1 -
 bin/node/testing/src/client.rs                |    6 +-
 bin/node/testing/src/genesis.rs               |   16 +-
 client/api/src/backend.rs                     |  109 +-
 client/api/src/cht.rs                         |  474 ----
 client/api/src/in_mem.rs                      |  123 +-
 client/api/src/lib.rs                         |    3 -
 client/api/src/light.rs                       |  372 ---
 client/api/src/proof_provider.rs              |   25 +-
 .../basic-authorship/src/basic_authorship.rs  |   14 +-
 client/block-builder/src/lib.rs               |   15 +-
 client/consensus/aura/src/import_queue.rs     |   18 +-
 client/consensus/aura/src/lib.rs              |   78 +-
 client/consensus/babe/src/lib.rs              |   41 +-
 client/consensus/babe/src/tests.rs            |   22 +-
 client/consensus/babe/src/verification.rs     |   13 +-
 client/consensus/common/src/block_import.rs   |    8 +-
 client/consensus/manual-seal/src/consensus.rs |    8 +-
 .../manual-seal/src/consensus/babe.rs         |   17 +-
 client/consensus/pow/src/lib.rs               |   18 +-
 client/consensus/slots/src/lib.rs             |   10 +-
 client/db/src/cache/list_cache.rs             | 2351 -----------------
 client/db/src/cache/list_entry.rs             |  187 --
 client/db/src/cache/list_storage.rs           |  441 ----
 client/db/src/cache/mod.rs                    |  413 ---
 client/db/src/changes_tries_storage.rs        | 1168 --------
 client/db/src/lib.rs                          |  273 +-
 client/db/src/light.rs                        | 1329 ----------
 client/db/src/parity_db.rs                    |    6 +-
 client/db/src/utils.rs                        |   79 +-
 client/executor/src/integration_tests/mod.rs  |    2 +-
 client/finality-grandpa/src/import.rs         |    7 +-
 client/finality-grandpa/src/lib.rs            |    5 +-
 client/finality-grandpa/src/tests.rs          |   47 +-
 client/light/Cargo.toml                       |   27 -
 client/light/README.md                        |    3 -
 client/light/src/backend.rs                   |  578 ----
 client/light/src/blockchain.rs                |  219 --
 client/light/src/call_executor.rs             |  206 --
 client/light/src/lib.rs                       |   41 -
 client/network/src/behaviour.rs               |   50 +-
 client/network/src/config.rs                  |    6 -
 client/network/src/lib.rs                     |    1 -
 client/network/src/light_client_requests.rs   |  268 --
 .../src/light_client_requests/handler.rs      |  119 +-
 .../src/light_client_requests/sender.rs       | 1294 ---------
 client/network/src/on_demand_layer.rs         |  241 --
 client/network/src/service.rs                 |   37 -
 client/network/src/service/tests.rs           |    1 -
 client/network/test/src/lib.rs                |  185 +-
 client/network/test/src/sync.rs               |  105 +-
 client/rpc/src/chain/chain_light.rs           |  114 -
 client/rpc/src/chain/mod.rs                   |   29 +-
 client/rpc/src/state/state_full.rs            |  125 +-
 client/rpc/src/state/tests.rs                 |   34 +-
 client/service/src/builder.rs                 |   27 +-
 client/service/src/client/call_executor.rs    |   18 +-
 client/service/src/client/client.rs           |  401 +--
 client/service/src/lib.rs                     |    2 +-
 client/service/test/Cargo.toml                |    1 -
 client/service/test/src/client/db.rs          |    1 -
 client/service/test/src/client/mod.rs         |  274 +-
 client/transaction-pool/src/api.rs            |  134 +-
 client/transaction-pool/src/lib.rs            |   32 +-
 frame/aura/src/lib.rs                         |    4 +-
 frame/babe/src/lib.rs                         |    2 +-
 frame/beefy-mmr/src/tests.rs                  |    2 +-
 frame/beefy/src/lib.rs                        |    4 +-
 frame/beefy/src/tests.rs                      |    3 +-
 frame/executive/src/lib.rs                    |    7 +-
 frame/grandpa/src/lib.rs                      |    2 +-
 frame/grandpa/src/mock.rs                     |    2 +-
 frame/support/src/storage/mod.rs              |   17 +-
 frame/system/benchmarking/src/lib.rs          |   21 +-
 frame/system/src/lib.rs                       |   64 +-
 .../api/proc-macro/src/impl_runtime_apis.rs   |    5 -
 .../proc-macro/src/mock_impl_runtime_apis.rs  |    4 -
 primitives/api/src/lib.rs                     |    5 +-
 primitives/api/test/tests/runtime_calls.rs    |    2 +-
 primitives/blockchain/src/backend.rs          |   31 -
 primitives/consensus/aura/src/digests.rs      |    4 +-
 primitives/consensus/babe/src/digests.rs      |    7 +-
 primitives/consensus/common/src/lib.rs        |    8 +-
 primitives/core/src/changes_trie.rs           |  321 ---
 primitives/core/src/lib.rs                    |    2 -
 primitives/externalities/src/lib.rs           |    7 -
 primitives/io/src/lib.rs                      |   15 +-
 primitives/runtime-interface/test/src/lib.rs  |    2 +-
 primitives/runtime/src/generic/digest.rs      |  248 +-
 primitives/runtime/src/generic/header.rs      |   26 +-
 primitives/runtime/src/generic/mod.rs         |    2 +-
 primitives/runtime/src/generic/tests.rs       |   15 +-
 primitives/runtime/src/testing.rs             |    4 +-
 primitives/runtime/src/traits.rs              |   16 +-
 primitives/state-machine/src/backend.rs       |   26 -
 primitives/state-machine/src/basic.rs         |    4 -
 .../state-machine/src/changes_trie/build.rs   | 1083 --------
 .../src/changes_trie/build_cache.rs           |  278 --
 .../src/changes_trie/build_iterator.rs        |  487 ----
 .../src/changes_trie/changes_iterator.rs      |  748 ------
 .../state-machine/src/changes_trie/input.rs   |  207 --
 .../state-machine/src/changes_trie/mod.rs     |  428 ---
 .../state-machine/src/changes_trie/prune.rs   |  204 --
 .../state-machine/src/changes_trie/storage.rs |  214 --
 .../src/changes_trie/surface_iterator.rs      |  326 ---
 primitives/state-machine/src/ext.rs           |  195 +-
 primitives/state-machine/src/lib.rs           |  210 +-
 .../src/overlayed_changes/changeset.rs        |    1 -
 .../src/overlayed_changes/mod.rs              |  146 +-
 primitives/state-machine/src/read_only.rs     |    4 -
 primitives/state-machine/src/testing.rs       |   73 +-
 primitives/storage/src/lib.rs                 |    3 -
 primitives/tasks/src/async_externalities.rs   |    4 -
 primitives/test-primitives/src/lib.rs         |    9 +-
 test-utils/client/Cargo.toml                  |    1 -
 test-utils/client/src/lib.rs                  |   10 +-
 test-utils/runtime/client/Cargo.toml          |    1 -
 .../runtime/client/src/block_builder_ext.rs   |   13 -
 test-utils/runtime/client/src/lib.rs          |  169 +-
 test-utils/runtime/src/genesismap.rs          |    7 -
 test-utils/runtime/src/lib.rs                 |   13 +-
 test-utils/runtime/src/system.rs              |   36 +-
 test-utils/test-runner/src/client.rs          |    3 -
 test-utils/test-runner/src/node.rs            |   16 +-
 utils/frame/benchmarking-cli/src/command.rs   |   14 +-
 utils/frame/rpc/system/src/lib.rs             |   89 +-
 .../cli/src/commands/follow_chain.rs          |    7 +-
 utils/frame/try-runtime/cli/src/lib.rs        |    3 +-
 141 files changed, 534 insertions(+), 17809 deletions(-)
 delete mode 100644 client/api/src/cht.rs
 delete mode 100644 client/api/src/light.rs
 delete mode 100644 client/db/src/cache/list_cache.rs
 delete mode 100644 client/db/src/cache/list_entry.rs
 delete mode 100644 client/db/src/cache/list_storage.rs
 delete mode 100644 client/db/src/cache/mod.rs
 delete mode 100644 client/db/src/changes_tries_storage.rs
 delete mode 100644 client/db/src/light.rs
 delete mode 100644 client/light/Cargo.toml
 delete mode 100644 client/light/README.md
 delete mode 100644 client/light/src/backend.rs
 delete mode 100644 client/light/src/blockchain.rs
 delete mode 100644 client/light/src/call_executor.rs
 delete mode 100644 client/light/src/lib.rs
 delete mode 100644 client/network/src/light_client_requests/sender.rs
 delete mode 100644 client/network/src/on_demand_layer.rs
 delete mode 100644 client/rpc/src/chain/chain_light.rs
 delete mode 100644 primitives/core/src/changes_trie.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/build.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/build_cache.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/build_iterator.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/changes_iterator.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/input.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/mod.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/prune.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/storage.rs
 delete mode 100644 primitives/state-machine/src/changes_trie/surface_iterator.rs

diff --git a/Cargo.lock b/Cargo.lock
index 5d2fa99ca55a5..74504d5f1547b 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8209,23 +8209,6 @@ dependencies = [
  "tempfile",
 ]
 
-[[package]]
-name = "sc-light"
-version = "4.0.0-dev"
-dependencies = [
- "hash-db",
- "parity-scale-codec",
- "parking_lot 0.11.1",
- "sc-client-api",
- "sc-executor",
- "sp-api",
- "sp-blockchain",
- "sp-core",
- "sp-externalities",
- "sp-runtime",
- "sp-state-machine",
-]
-
 [[package]]
 name = "sc-network"
 version = "0.10.0-dev"
@@ -8558,7 +8541,6 @@ dependencies = [
  "sc-client-db",
  "sc-consensus",
  "sc-executor",
- "sc-light",
  "sc-network",
  "sc-service",
  "sc-transaction-pool-api",
@@ -10176,7 +10158,6 @@ dependencies = [
  "sc-client-db",
  "sc-consensus",
  "sc-executor",
- "sc-light",
  "sc-offchain",
  "sc-service",
  "serde",
@@ -10245,7 +10226,6 @@ dependencies = [
  "sc-block-builder",
  "sc-client-api",
  "sc-consensus",
- "sc-light",
  "sp-api",
  "sp-blockchain",
  "sp-consensus",
diff --git a/Cargo.toml b/Cargo.toml
index ca60af692497d..e03f33a4d27d5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -42,7 +42,6 @@ members = [
 	"client/finality-grandpa",
 	"client/informant",
 	"client/keystore",
-	"client/light",
 	"client/network",
 	"client/network-gossip",
 	"client/network/test",
diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs
index baf5e5d41ab85..d32a0dcb29d00 100644
--- a/bin/node-template/node/src/chain_spec.rs
+++ b/bin/node-template/node/src/chain_spec.rs
@@ -134,7 +134,6 @@ fn testnet_genesis(
 		system: SystemConfig {
 			// Add Wasm runtime to storage.
 			code: wasm_binary.to_vec(),
-			changes_trie_config: Default::default(),
 		},
 		balances: BalancesConfig {
 			// Configure endowed accounts with initial balance of 1 << 60.
diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs
index c71336c330882..82b1c5625373c 100644
--- a/bin/node-template/node/src/service.rs
+++ b/bin/node-template/node/src/service.rs
@@ -194,7 +194,6 @@ pub fn new_full(mut config: Configuration) -> Result
 			transaction_pool: transaction_pool.clone(),
 			spawn_handle: task_manager.spawn_handle(),
 			import_queue,
-			on_demand: None,
 			block_announce_validator_builder: None,
 			warp_sync: Some(warp_sync),
 		})?;
@@ -234,8 +233,6 @@ pub fn new_full(mut config: Configuration) -> Result
 		task_manager: &mut task_manager,
 		transaction_pool: transaction_pool.clone(),
 		rpc_extensions_builder,
-		on_demand: None,
-		remote_blockchain: None,
 		backend,
 		system_rpc_tx,
 		config,
diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs
index 7b1ed90017c36..6d11722081e30 100644
--- a/bin/node/cli/src/chain_spec.rs
+++ b/bin/node/cli/src/chain_spec.rs
@@ -295,10 +295,7 @@ pub fn testnet_genesis(
 	const STASH: Balance = ENDOWMENT / 1000;
 
 	GenesisConfig {
-		system: SystemConfig {
-			code: wasm_binary_unwrap().to_vec(),
-			changes_trie_config: Default::default(),
-		},
+		system: SystemConfig { code: wasm_binary_unwrap().to_vec() },
 		balances: BalancesConfig {
 			balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(),
 		},
diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs
index dee546bf07001..e73b69153d1df 100644
--- a/bin/node/cli/src/service.rs
+++ b/bin/node/cli/src/service.rs
@@ -338,7 +338,6 @@ pub fn new_full_base(
 			transaction_pool: transaction_pool.clone(),
 			spawn_handle: task_manager.spawn_handle(),
 			import_queue,
-			on_demand: None,
 			block_announce_validator_builder: None,
 			warp_sync: Some(warp_sync),
 		})?;
@@ -369,8 +368,6 @@ pub fn new_full_base(
 		rpc_extensions_builder: Box::new(rpc_extensions_builder),
 		transaction_pool: transaction_pool.clone(),
 		task_manager: &mut task_manager,
-		on_demand: None,
-		remote_blockchain: None,
 		system_rpc_tx,
 		telemetry: telemetry.as_mut(),
 	})?;
@@ -542,7 +539,7 @@ mod tests {
 	use sc_service_test::TestNetNode;
 	use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool};
 	use sp_consensus::{BlockOrigin, Environment, Proposer};
-	use sp_core::{crypto::Pair as CryptoPair, Public, H256};
+	use sp_core::{crypto::Pair as CryptoPair, Public};
 	use sp_inherents::InherentDataProvider;
 	use sp_keyring::AccountKeyring;
 	use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr};
@@ -621,7 +618,7 @@ mod tests {
 					None,
 				);
 
-				let mut digest = Digest::::default();
+				let mut digest = Digest::default();
 
 				// even though there's only one authority some slots might be empty,
 				// so we must keep trying the next slots until we can claim one.
diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs
index 1a39c9decb321..03c3eb53e23f9 100644
--- a/bin/node/executor/benches/bench.rs
+++ b/bin/node/executor/benches/bench.rs
@@ -53,7 +53,7 @@ const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version;
 
 const HEAP_PAGES: u64 = 20;
 
-type TestExternalities = CoreTestExternalities;
+type TestExternalities = CoreTestExternalities;
 
 #[derive(Debug)]
 enum ExecutionMethod {
@@ -188,7 +188,7 @@ fn bench_execute_block(c: &mut Criterion) {
 
 	for strategy in execution_methods {
 		group.bench_function(format!("{:?}", strategy), |b| {
-			let genesis_config = node_testing::genesis::config(false, Some(compact_code_unwrap()));
+			let genesis_config = node_testing::genesis::config(Some(compact_code_unwrap()));
 			let (use_native, wasm_method) = match strategy {
 				ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted),
 				ExecutionMethod::Wasm(wasm_method) => (false, wasm_method),
diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs
index bbb9339189b06..b7df7bf0bd41a 100644
--- a/bin/node/executor/tests/basic.rs
+++ b/bin/node/executor/tests/basic.rs
@@ -29,7 +29,7 @@ use sp_runtime::{
 use node_primitives::{Balance, Hash};
 use node_runtime::{
 	constants::{currency::*, time::SLOT_DURATION},
-	Balances, Block, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment,
+	Balances, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment,
 	UncheckedExtrinsic,
 };
 use node_testing::keyring::*;
@@ -78,7 +78,7 @@ fn set_heap_pages(ext: &mut E, heap_pages: u64) {
 fn changes_trie_block() -> (Vec, Hash) {
 	let time = 42 * 1000;
 	construct_block(
-		&mut new_test_ext(compact_code_unwrap(), true),
+		&mut new_test_ext(compact_code_unwrap()),
 		1,
 		GENESIS_HASH.into(),
 		vec![
@@ -102,7 +102,7 @@ fn changes_trie_block() -> (Vec, Hash) {
 /// are not guaranteed to be deterministic) and to ensure that the correct state is propagated
 /// from block1's execution to block2 to derive the correct storage_root.
 fn blocks() -> ((Vec, Hash), (Vec, Hash)) {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	let time1 = 42 * 1000;
 	let block1 = construct_block(
 		&mut t,
@@ -160,7 +160,7 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) {
 
 fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) {
 	construct_block(
-		&mut new_test_ext(compact_code_unwrap(), false),
+		&mut new_test_ext(compact_code_unwrap()),
 		1,
 		GENESIS_HASH.into(),
 		vec![
@@ -179,7 +179,7 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) {
 
 #[test]
 fn panic_execution_with_foreign_code_gives_error() {
-	let mut t = new_test_ext(bloaty_code_unwrap(), false);
+	let mut t = new_test_ext(bloaty_code_unwrap());
 	t.insert(
 		>::hashed_key_for(alice()),
 		(69u128, 0u32, 0u128, 0u128, 0u128).encode(),
@@ -211,7 +211,7 @@ fn panic_execution_with_foreign_code_gives_error() {
 
 #[test]
 fn bad_extrinsic_with_native_equivalent_code_gives_error() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	t.insert(
 		>::hashed_key_for(alice()),
 		(0u32, 0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode(),
@@ -243,7 +243,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() {
 
 #[test]
 fn successful_execution_with_native_equivalent_code_gives_ok() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	t.insert(
 		>::hashed_key_for(alice()),
 		AccountInfo::<::Index, _> {
@@ -296,7 +296,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() {
 
 #[test]
 fn successful_execution_with_foreign_code_gives_ok() {
-	let mut t = new_test_ext(bloaty_code_unwrap(), false);
+	let mut t = new_test_ext(bloaty_code_unwrap());
 	t.insert(
 		>::hashed_key_for(alice()),
 		AccountInfo::<::Index, _> {
@@ -349,7 +349,7 @@ fn successful_execution_with_foreign_code_gives_ok() {
 
 #[test]
 fn full_native_block_import_works() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 
 	let (block1, block2) = blocks();
 
@@ -529,7 +529,7 @@ fn full_native_block_import_works() {
 
 #[test]
 fn full_wasm_block_import_works() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 
 	let (block1, block2) = blocks();
 
@@ -679,7 +679,7 @@ fn deploying_wasm_contract_should_work() {
 
 	let time = 42 * 1000;
 	let b = construct_block(
-		&mut new_test_ext(compact_code_unwrap(), false),
+		&mut new_test_ext(compact_code_unwrap()),
 		1,
 		GENESIS_HASH.into(),
 		vec![
@@ -712,7 +712,7 @@ fn deploying_wasm_contract_should_work() {
 		(time / SLOT_DURATION).into(),
 	);
 
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 
 	executor_call:: _>(&mut t, "Core_execute_block", &b.0, false, None)
 		.0
@@ -727,7 +727,7 @@ fn deploying_wasm_contract_should_work() {
 
 #[test]
 fn wasm_big_block_import_fails() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 
 	set_heap_pages(&mut t.ext(), 4);
 
@@ -744,7 +744,7 @@ fn wasm_big_block_import_fails() {
 
 #[test]
 fn native_big_block_import_succeeds() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 
 	executor_call:: _>(
 		&mut t,
@@ -759,7 +759,7 @@ fn native_big_block_import_succeeds() {
 
 #[test]
 fn native_big_block_import_fails_on_fallback() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 
 	// We set the heap pages to 8 because we know that should give an OOM in WASM with the given
 	// block.
@@ -778,7 +778,7 @@ fn native_big_block_import_fails_on_fallback() {
 
 #[test]
 fn panic_execution_gives_error() {
-	let mut t = new_test_ext(bloaty_code_unwrap(), false);
+	let mut t = new_test_ext(bloaty_code_unwrap());
 	t.insert(
 		>::hashed_key_for(alice()),
 		AccountInfo::<::Index, _> {
@@ -815,7 +815,7 @@ fn panic_execution_gives_error() {
 
 #[test]
 fn successful_execution_gives_ok() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	t.insert(
 		>::hashed_key_for(alice()),
 		AccountInfo::<::Index, _> {
@@ -874,44 +874,6 @@ fn successful_execution_gives_ok() {
 	});
 }
 
-#[test]
-fn full_native_block_import_works_with_changes_trie() {
-	let block1 = changes_trie_block();
-	let block_data = block1.0;
-	let block = Block::decode(&mut &block_data[..]).unwrap();
-
-	let mut t = new_test_ext(compact_code_unwrap(), true);
-	executor_call:: _>(
-		&mut t,
-		"Core_execute_block",
-		&block.encode(),
-		true,
-		None,
-	)
-	.0
-	.unwrap();
-
-	assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some());
-}
-
-#[test]
-fn full_wasm_block_import_works_with_changes_trie() {
-	let block1 = changes_trie_block();
-
-	let mut t = new_test_ext(compact_code_unwrap(), true);
-	executor_call:: _>(
-		&mut t,
-		"Core_execute_block",
-		&block1.0,
-		false,
-		None,
-	)
-	.0
-	.unwrap();
-
-	assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some());
-}
-
 #[test]
 fn should_import_block_with_test_client() {
 	use node_testing::client::{
diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs
index d1c24c83c836d..c81d6b7c14e02 100644
--- a/bin/node/executor/tests/common.rs
+++ b/bin/node/executor/tests/common.rs
@@ -81,7 +81,7 @@ pub const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version;
 
 pub const TRANSACTION_VERSION: u32 = node_runtime::VERSION.transaction_version;
 
-pub type TestExternalities = CoreTestExternalities;
+pub type TestExternalities = CoreTestExternalities;
 
 pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic {
 	node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH)
@@ -123,14 +123,11 @@ pub fn executor_call<
 	executor().call::(&mut t, &runtime_code, method, data, use_native, native_call)
 }
 
-pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities {
-	let mut ext = TestExternalities::new_with_code(
+pub fn new_test_ext(code: &[u8]) -> TestExternalities {
+	let ext = TestExternalities::new_with_code(
 		code,
-		node_testing::genesis::config(support_changes_trie, Some(code))
-			.build_storage()
-			.unwrap(),
+		node_testing::genesis::config(Some(code)).build_storage().unwrap(),
 	);
-	ext.changes_trie_storage().insert(0, GENESIS_HASH.into(), Default::default());
 	ext
 }
 
diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs
index 4c7593bdb3ab2..4767c1bda1fcd 100644
--- a/bin/node/executor/tests/fees.rs
+++ b/bin/node/executor/tests/fees.rs
@@ -36,7 +36,7 @@ use self::common::{sign, *};
 
 #[test]
 fn fee_multiplier_increases_and_decreases_on_big_weight() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 
 	// initial fee multiplier must be one.
 	let mut prev_multiplier = Multiplier::one();
@@ -45,7 +45,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() {
 		assert_eq!(TransactionPayment::next_fee_multiplier(), prev_multiplier);
 	});
 
-	let mut tt = new_test_ext(compact_code_unwrap(), false);
+	let mut tt = new_test_ext(compact_code_unwrap());
 
 	let time1 = 42 * 1000;
 	// big one in terms of weight.
@@ -151,7 +151,7 @@ fn transaction_fee_is_correct() {
 	//   - 1 MILLICENTS in substrate node.
 	//   - 1 milli-dot based on current polkadot runtime.
 	// (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`)
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	t.insert(>::hashed_key_for(alice()), new_account_info(100));
 	t.insert(>::hashed_key_for(bob()), new_account_info(10));
 	t.insert(
@@ -226,9 +226,9 @@ fn block_weight_capacity_report() {
 	use node_primitives::Index;
 
 	// execution ext.
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	// setup ext.
-	let mut tt = new_test_ext(compact_code_unwrap(), false);
+	let mut tt = new_test_ext(compact_code_unwrap());
 
 	let factor = 50;
 	let mut time = 10;
@@ -303,9 +303,9 @@ fn block_length_capacity_report() {
 	use node_primitives::Index;
 
 	// execution ext.
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	// setup ext.
-	let mut tt = new_test_ext(compact_code_unwrap(), false);
+	let mut tt = new_test_ext(compact_code_unwrap());
 
 	let factor = 256 * 1024;
 	let mut time = 10;
diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs
index 19ca8e5677c43..f047c6a44a667 100644
--- a/bin/node/executor/tests/submit_transaction.rs
+++ b/bin/node/executor/tests/submit_transaction.rs
@@ -28,7 +28,7 @@ use self::common::*;
 
 #[test]
 fn should_submit_unsigned_transaction() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	let (pool, state) = TestTransactionPoolExt::new();
 	t.register_extension(TransactionPoolExt::new(pool));
 
@@ -56,7 +56,7 @@ const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put c
 
 #[test]
 fn should_submit_signed_transaction() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	let (pool, state) = TestTransactionPoolExt::new();
 	t.register_extension(TransactionPoolExt::new(pool));
 
@@ -99,7 +99,7 @@ fn should_submit_signed_transaction() {
 
 #[test]
 fn should_submit_signed_twice_from_the_same_account() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	let (pool, state) = TestTransactionPoolExt::new();
 	t.register_extension(TransactionPoolExt::new(pool));
 
@@ -156,7 +156,7 @@ fn should_submit_signed_twice_from_the_same_account() {
 
 #[test]
 fn should_submit_signed_twice_from_all_accounts() {
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	let (pool, state) = TestTransactionPoolExt::new();
 	t.register_extension(TransactionPoolExt::new(pool));
 
@@ -220,7 +220,7 @@ fn submitted_transaction_should_be_valid() {
 		transaction_validity::{TransactionSource, TransactionTag},
 	};
 
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	let (pool, state) = TestTransactionPoolExt::new();
 	t.register_extension(TransactionPoolExt::new(pool));
 
@@ -249,7 +249,7 @@ fn submitted_transaction_should_be_valid() {
 	// check that transaction is valid, but reset environment storage,
 	// since CreateTransaction increments the nonce
 	let tx0 = state.read().transactions[0].clone();
-	let mut t = new_test_ext(compact_code_unwrap(), false);
+	let mut t = new_test_ext(compact_code_unwrap());
 	t.execute_with(|| {
 		let source = TransactionSource::External;
 		let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap();
diff --git a/bin/node/primitives/src/lib.rs b/bin/node/primitives/src/lib.rs
index dade598c704d2..fc3bd5f5114e3 100644
--- a/bin/node/primitives/src/lib.rs
+++ b/bin/node/primitives/src/lib.rs
@@ -57,7 +57,7 @@ pub type Hash = sp_core::H256;
 pub type Timestamp = u64;
 
 /// Digest item type.
-pub type DigestItem = generic::DigestItem;
+pub type DigestItem = generic::DigestItem;
 /// Header type.
 pub type Header = generic::Header;
 /// Block type.
diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs
index 2f7862d3d2644..51ab191d5e9a5 100644
--- a/bin/node/rpc/src/lib.rs
+++ b/bin/node/rpc/src/lib.rs
@@ -51,18 +51,6 @@ use sp_consensus::SelectChain;
 use sp_consensus_babe::BabeApi;
 use sp_keystore::SyncCryptoStorePtr;
 
-/// Light client extra dependencies.
-pub struct LightDeps {
-	/// The client instance to use.
-	pub client: Arc,
-	/// Transaction pool instance.
-	pub pool: Arc

, - /// Remote access to the blockchain (async). - pub remote_blockchain: Arc>, - /// Fetcher instance. - pub fetcher: Arc, -} - /// Extra dependencies for BABE. pub struct BabeDeps { /// BABE protocol config. @@ -183,26 +171,3 @@ where Ok(io) } - -/// Instantiate all Light RPC extensions. -pub fn create_light(deps: LightDeps) -> jsonrpc_core::IoHandler -where - C: sp_blockchain::HeaderBackend, - C: Send + Sync + 'static, - F: sc_client_api::light::Fetcher + 'static, - P: TransactionPool + 'static, - M: jsonrpc_core::Metadata + Default, -{ - use substrate_frame_rpc_system::{LightSystem, SystemApi}; - - let LightDeps { client, pool, remote_blockchain, fetcher } = deps; - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with(SystemApi::::to_delegate(LightSystem::new( - client, - remote_blockchain, - fetcher, - pool, - ))); - - io -} diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 5ee1ec998be4d..3240497a9d623 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -591,7 +591,6 @@ impl BenchKeyring { /// Generate genesis with accounts from this keyring endowed with some balance. pub fn generate_genesis(&self) -> node_runtime::GenesisConfig { crate::genesis::config_endowed( - false, Some(node_runtime::wasm_binary_unwrap()), self.collect_account_ids(), ) diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index 8bd75834c5496..4852e33de6070 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -42,13 +42,11 @@ pub type Transaction = sc_client_api::backend::TransactionFor Storage { - crate::genesis::config(self.support_changes_trie, None).build_storage().unwrap() + crate::genesis::config(None).build_storage().unwrap() } } diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 80399a6670e86..fa0dd22c9c995 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -24,22 +24,17 @@ use node_runtime::{ GenesisConfig, GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig, SystemConfig, BABE_GENESIS_EPOCH_CONFIG, }; -use sp_core::ChangesTrieConfiguration; use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. -pub fn config(support_changes_trie: bool, code: Option<&[u8]>) -> GenesisConfig { - config_endowed(support_changes_trie, code, Default::default()) +pub fn config(code: Option<&[u8]>) -> GenesisConfig { + config_endowed(code, Default::default()) } /// Create genesis runtime configuration for tests with some extra /// endowed accounts. -pub fn config_endowed( - support_changes_trie: bool, - code: Option<&[u8]>, - extra_endowed: Vec, -) -> GenesisConfig { +pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> GenesisConfig { let mut endowed = vec![ (alice(), 111 * DOLLARS), (bob(), 100 * DOLLARS), @@ -53,11 +48,6 @@ pub fn config_endowed( GenesisConfig { system: SystemConfig { - changes_trie_config: if support_changes_trie { - Some(ChangesTrieConfiguration { digest_interval: 2, digest_levels: 2 }) - } else { - None - }, code: code.map(|x| x.to_vec()).unwrap_or_else(|| wasm_binary_unwrap().to_vec()), }, indices: IndicesConfig { indices: vec![] }, diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 9dfe82a57ab3b..a681d75fe9af3 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -20,27 +20,22 @@ use crate::{ blockchain::{well_known_cache_keys, Backend as BlockchainBackend}, - light::RemoteBlockchain, UsageInfo, }; use parking_lot::RwLock; use sp_blockchain; use sp_consensus::BlockOrigin; -use sp_core::{offchain::OffchainStorage, ChangesTrieConfigurationRange}; +use sp_core::offchain::OffchainStorage; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, Justification, Justifications, Storage, }; use sp_state_machine::{ - ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, ChildStorageCollection, IndexOperation, OffchainChangesCollection, StorageCollection, }; -use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; +use sp_storage::{ChildInfo, StorageData, StorageKey}; +use std::collections::{HashMap, HashSet}; pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; use std::marker::PhantomData; @@ -191,12 +186,6 @@ pub trait BlockImportOperation { Ok(()) } - /// Inject changes trie data into the database. - fn update_changes_trie( - &mut self, - update: ChangesTrieTransaction, NumberFor>, - ) -> sp_blockchain::Result<()>; - /// Insert auxiliary keys. /// /// Values are `None` if should be deleted. @@ -418,28 +407,6 @@ pub trait StorageProvider> { child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; - - /// Get longest range within [first; last] that is possible to use in `key_changes` - /// and `key_changes_proof` calls. - /// Range could be shortened from the beginning if some changes tries have been pruned. - /// Returns Ok(None) if changes tries are not supported. - fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> sp_blockchain::Result, BlockId)>>; - - /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. - /// Works only for runtimes that are supporting changes tries. - /// - /// Changes are returned in descending order (i.e. last block comes first). - fn key_changes( - &self, - first: NumberFor, - last: BlockId, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result, u32)>>; } /// Client backend. @@ -504,9 +471,6 @@ pub trait Backend: AuxStore + Send + Sync { /// Returns current usage statistics. fn usage_info(&self) -> Option; - /// Returns reference to changes trie storage. - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage>; - /// Returns a handle to offchain storage. fn offchain_storage(&self) -> Option; @@ -561,72 +525,5 @@ pub trait Backend: AuxStore + Send + Sync { fn get_import_lock(&self) -> &RwLock<()>; } -/// Changes trie storage that supports pruning. -pub trait PrunableStateChangesTrieStorage: - StateChangesTrieStorage, NumberFor> -{ - /// Get reference to StateChangesTrieStorage. - fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; - /// Get configuration at given block. - fn configuration_at( - &self, - at: &BlockId, - ) -> sp_blockchain::Result, Block::Hash>>; - /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. - /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if - /// created). - fn oldest_pruned_digest_range_end(&self) -> NumberFor; -} - /// Mark for all Backend implementations, that are making use of state data, stored locally. pub trait LocalBackend: Backend {} - -/// Mark for all Backend implementations, that are fetching required state data from remote nodes. -pub trait RemoteBackend: Backend { - /// Returns true if the state for given block is available locally. - fn is_local_state_available(&self, block: &BlockId) -> bool; - - /// Returns reference to blockchain backend. - /// - /// Returned backend either resolves blockchain data - /// locally, or prepares request to fetch that data from remote node. - fn remote_blockchain(&self) -> Arc>; -} - -/// Return changes tries state at given block. -pub fn changes_tries_state_at_block<'a, Block: BlockT>( - block: &BlockId, - maybe_storage: Option<&'a dyn PrunableStateChangesTrieStorage>, -) -> sp_blockchain::Result, NumberFor>>> { - let storage = match maybe_storage { - Some(storage) => storage, - None => return Ok(None), - }; - - let config_range = storage.configuration_at(block)?; - match config_range.config { - Some(config) => - Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), - None => Ok(None), - } -} - -/// Provide CHT roots. These are stored on a light client and generated dynamically on a full -/// client. -pub trait ProvideChtRoots { - /// Get headers CHT root for given block. Returns None if the block is not a part of any CHT. - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result>; - - /// Get changes trie CHT root for given block. Returns None if the block is not a part of any - /// CHT. - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result>; -} diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs deleted file mode 100644 index ee7854b5d8297..0000000000000 --- a/client/api/src/cht.rs +++ /dev/null @@ -1,474 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Canonical hash trie definitions and helper functions. -//! -//! Each CHT is a trie mapping block numbers to canonical hash. -//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in -//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply -//! request an inclusion proof of a specific block number against the trie with the -//! root hash. A correct proof implies that the claimed block is identical to the one -//! we discarded. - -use codec::Encode; -use hash_db; -use sp_trie; - -use sp_core::{convert_hash, H256}; -use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; -use sp_state_machine::{ - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, - Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, -}; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -/// The size of each CHT. This value is passed to every CHT-related function from -/// production code. Other values are passed from tests. -const SIZE: u32 = 2048; - -/// Gets default CHT size. -pub fn size>() -> N { - SIZE.into() -} - -/// Returns Some(cht_number) if CHT is need to be built when the block with given number is -/// canonized. -pub fn is_build_required(cht_size: N, block_num: N) -> Option -where - N: Clone + AtLeast32Bit, -{ - let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?; - let two = N::one() + N::one(); - if block_cht_num < two { - return None - } - let cht_start = start_number(cht_size, block_cht_num.clone()); - if cht_start != block_num { - return None - } - - Some(block_cht_num - two) -} - -/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number. -pub fn max_cht_number(cht_size: N, max_canonical_block: N) -> Option -where - N: Clone + AtLeast32Bit, -{ - let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?; - let two = N::one() + N::one(); - if max_cht_number < two { - return None - } - Some(max_cht_number - two) -} - -/// Compute a CHT root from an iterator of block hashes. Fails if shorter than -/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`. -/// Discards the trie's nodes. -pub fn compute_root( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I, -) -> ClientResult -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - I: IntoIterator>>, -{ - use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::trie_root(build_pairs::( - cht_size, cht_num, hashes, - )?)) -} - -/// Build CHT-based header proof. -pub fn build_proof( - cht_size: Header::Number, - cht_num: Header::Number, - blocks: BlocksI, - hashes: HashesI, -) -> ClientResult -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, - BlocksI: IntoIterator, - HashesI: IntoIterator>>, -{ - let transaction = build_pairs::(cht_size, cht_num, hashes)? - .into_iter() - .map(|(k, v)| (k, Some(v))) - .collect::>(); - let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); - let trie_storage = storage - .as_trie_backend() - .expect("InMemoryState::as_trie_backend always returns Some; qed"); - prove_read_on_trie_backend( - trie_storage, - blocks.into_iter().map(|number| encode_cht_key(number)), - ) - .map_err(ClientError::from_state) -} - -/// Check CHT-based header proof. -pub fn check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - remote_proof: StorageProof, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, -{ - do_check_proof::( - local_root, - local_number, - remote_hash, - move |local_root, local_cht_key| { - read_proof_check::( - local_root, - remote_proof, - ::std::iter::once(local_cht_key), - ) - .map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed")) - .map_err(ClientError::from_state) - }, - ) -} - -/// Check CHT-based header proof on pre-created proving backend. -pub fn check_proof_on_proving_backend( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, -{ - do_check_proof::( - local_root, - local_number, - remote_hash, - |_, local_cht_key| { - read_proof_check_on_proving_backend::(proving_backend, local_cht_key) - .map_err(ClientError::from_state) - }, - ) -} - -/// Check CHT-based header proof using passed checker function. -fn do_check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - checker: F, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, -{ - let root: Hasher::Out = convert_hash(&local_root); - let local_cht_key = encode_cht_key(local_number); - let local_cht_value = checker(root, &local_cht_key)?; - let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?; - let local_hash = - decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; - match &local_hash[..] == remote_hash.as_ref() { - true => Ok(()), - false => Err(ClientError::InvalidCHTProof.into()), - } -} - -/// Group ordered blocks by CHT number and call functor with blocks of each group. -pub fn for_each_cht_group( - cht_size: Header::Number, - blocks: I, - mut functor: F, - mut functor_param: P, -) -> ClientResult<()> -where - Header: HeaderT, - I: IntoIterator, - F: FnMut(P, Header::Number, Vec) -> ClientResult

, -{ - let mut current_cht_num = None; - let mut current_cht_blocks = Vec::new(); - for block in blocks { - let new_cht_num = block_to_cht_number(cht_size, block).ok_or_else(|| { - ClientError::Backend(format!("Cannot compute CHT root for the block #{}", block)) - })?; - - let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); - if advance_to_next_cht { - let current_cht_num = current_cht_num.expect( - "advance_to_next_cht is true; - it is true only when current_cht_num is Some; qed", - ); - assert!( - new_cht_num > current_cht_num, - "for_each_cht_group only supports ordered iterators" - ); - - functor_param = - functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; - } - - current_cht_blocks.push(block); - current_cht_num = Some(new_cht_num); - } - - if let Some(current_cht_num) = current_cht_num { - functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; - } - - Ok(()) -} - -/// Build pairs for computing CHT. -fn build_pairs( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I, -) -> ClientResult, Vec)>> -where - Header: HeaderT, - I: IntoIterator>>, -{ - let start_num = start_number(cht_size, cht_num); - let mut pairs = Vec::new(); - let mut hash_index = Header::Number::zero(); - for hash in hashes.into_iter() { - let hash = - hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?; - pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash))); - hash_index += Header::Number::one(); - if hash_index == cht_size { - break - } - } - - if hash_index == cht_size { - Ok(pairs) - } else { - Err(ClientError::MissingHashRequiredForCHT) - } -} - -/// Get the starting block of a given CHT. -/// CHT 0 includes block 1...SIZE, -/// CHT 1 includes block SIZE + 1 ... 2*SIZE -/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE). -/// This is because the genesis hash is assumed to be known -/// and including it would be redundant. -pub fn start_number(cht_size: N, cht_num: N) -> N { - (cht_num * cht_size) + N::one() -} - -/// Get the ending block of a given CHT. -pub fn end_number(cht_size: N, cht_num: N) -> N { - (cht_num + N::one()) * cht_size -} - -/// Convert a block number to a CHT number. -/// Returns `None` for `block_num` == 0, `Some` otherwise. -pub fn block_to_cht_number(cht_size: N, block_num: N) -> Option { - if block_num == N::zero() { - None - } else { - Some((block_num - N::one()) / cht_size) - } -} - -/// Convert header number into CHT key. -pub fn encode_cht_key(number: N) -> Vec { - number.encode() -} - -/// Convert header hash into CHT value. -fn encode_cht_value>(hash: Hash) -> Vec { - hash.as_ref().to_vec() -} - -/// Convert CHT value into block header hash. -pub fn decode_cht_value(value: &[u8]) -> Option { - match value.len() { - 32 => Some(H256::from_slice(&value[0..32])), - _ => None, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::{generic, traits::BlakeTwo256}; - - type Header = generic::Header; - - #[test] - fn is_build_required_works() { - assert_eq!(is_build_required(SIZE, 0u32.into()), None); - assert_eq!(is_build_required(SIZE, 1u32.into()), None); - assert_eq!(is_build_required(SIZE, SIZE), None); - assert_eq!(is_build_required(SIZE, SIZE + 1), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None); - } - - #[test] - fn max_cht_number_works() { - assert_eq!(max_cht_number(SIZE, 0u32.into()), None); - assert_eq!(max_cht_number(SIZE, 1u32.into()), None); - assert_eq!(max_cht_number(SIZE, SIZE), None); - assert_eq!(max_cht_number(SIZE, SIZE + 1), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1)); - } - - #[test] - fn start_number_works() { - assert_eq!(start_number(SIZE, 0u32), 1u32); - assert_eq!(start_number(SIZE, 1u32), SIZE + 1); - assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1); - } - - #[test] - fn end_number_works() { - assert_eq!(end_number(SIZE, 0u32), SIZE); - assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE); - assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE); - } - - #[test] - fn build_pairs_fails_when_no_enough_blocks() { - assert!(build_pairs::( - SIZE as _, - 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) - ) - .is_err()); - } - - #[test] - fn build_pairs_fails_when_missing_block() { - assert!(build_pairs::( - SIZE as _, - 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize / 2) - .chain(::std::iter::once(Ok(None))) - .chain( - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) - .take(SIZE as usize / 2 - 1) - ) - ) - .is_err()); - } - - #[test] - fn compute_root_works() { - assert!(compute_root::( - SIZE as _, - 42, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_ok()); - } - - #[test] - #[should_panic] - fn build_proof_panics_when_querying_wrong_block() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE * 1000) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_err()); - } - - #[test] - fn build_proof_works() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE / 2) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_ok()); - } - - #[test] - #[should_panic] - fn for_each_cht_group_panics() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![cht_size * 5, cht_size * 2], - |_, _, _| Ok(()), - (), - ); - } - - #[test] - fn for_each_cht_group_works() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![ - cht_size * 2 + 1, - cht_size * 2 + 2, - cht_size * 2 + 5, - cht_size * 4 + 1, - cht_size * 4 + 7, - cht_size * 6 + 1, - ], - |_, cht_num, blocks| { - match cht_num { - 2 => assert_eq!( - blocks, - vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5] - ), - 4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]), - 6 => assert_eq!(blocks, vec![cht_size * 6 + 1]), - _ => unreachable!(), - } - - Ok(()) - }, - (), - ); - } -} diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 2f4327dfc4e4a..39fe9e063d20b 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -29,8 +29,8 @@ use sp_runtime::{ Justification, Justifications, Storage, }; use sp_state_machine::{ - Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, - IndexOperation, StorageCollection, + Backend as StateBackend, ChildStorageCollection, InMemoryBackend, IndexOperation, + StorageCollection, }; use std::{ collections::{HashMap, HashSet}, @@ -39,10 +39,10 @@ use std::{ }; use crate::{ - backend::{self, NewBlockState, ProvideChtRoots}, + backend::{self, NewBlockState}, blockchain::{self, well_known_cache_keys::Id as CacheKeyId, BlockStatus, HeaderBackend}, leaves::LeafSet, - light, UsageInfo, + UsageInfo, }; struct PendingBlock { @@ -109,7 +109,6 @@ struct BlockchainStorage { finalized_number: NumberFor, genesis_hash: Block::Hash, header_cht_roots: HashMap, Block::Hash>, - changes_trie_cht_roots: HashMap, Block::Hash>, leaves: LeafSet>, aux: HashMap, Vec>, } @@ -152,7 +151,6 @@ impl Blockchain { finalized_number: Zero::zero(), genesis_hash: Default::default(), header_cht_roots: HashMap::new(), - changes_trie_cht_roots: HashMap::new(), leaves: LeafSet::new(), aux: HashMap::new(), })); @@ -442,10 +440,6 @@ impl blockchain::Backend for Blockchain { Ok(self.storage.read().finalized_hash.clone()) } - fn cache(&self) -> Option>> { - None - } - fn leaves(&self) -> sp_blockchain::Result> { Ok(self.storage.read().leaves.hashes()) } @@ -466,12 +460,6 @@ impl blockchain::Backend for Blockchain { } } -impl blockchain::ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - None - } -} - impl backend::AuxStore for Blockchain { fn insert_aux< 'a, @@ -499,82 +487,6 @@ impl backend::AuxStore for Blockchain { } } -impl light::Storage for Blockchain -where - Block::Hash: From<[u8; 32]>, -{ - fn import_header( - &self, - header: Block::Header, - _cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> sp_blockchain::Result<()> { - let hash = header.hash(); - self.insert(hash, header, None, None, state)?; - - self.write_aux(aux_ops); - Ok(()) - } - - fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { - Blockchain::set_head(self, id) - } - - fn last_finalized(&self) -> sp_blockchain::Result { - Ok(self.storage.read().finalized_hash.clone()) - } - - fn finalize_header(&self, id: BlockId) -> sp_blockchain::Result<()> { - Blockchain::finalize_header(self, id, None) - } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } -} - -impl ProvideChtRoots for Blockchain { - fn header_cht_root( - &self, - _cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage - .read() - .header_cht_roots - .get(&block) - .cloned() - .ok_or_else(|| { - sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block)) - }) - .map(Some) - } - - fn changes_trie_cht_root( - &self, - _cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage - .read() - .changes_trie_cht_roots - .get(&block) - .cloned() - .ok_or_else(|| { - sp_blockchain::Error::Backend(format!( - "Changes trie CHT for block {} not exists", - block - )) - }) - .map(Some) - } -} - /// In-memory operation. pub struct BlockImportOperation { pending_block: Option>, @@ -650,13 +562,6 @@ where Ok(()) } - fn update_changes_trie( - &mut self, - _update: ChangesTrieTransaction, NumberFor>, - ) -> sp_blockchain::Result<()> { - Ok(()) - } - fn set_genesis_state( &mut self, storage: Storage, @@ -846,10 +751,6 @@ where None } - fn changes_trie_storage(&self) -> Option<&dyn backend::PrunableStateChangesTrieStorage> { - None - } - fn offchain_storage(&self) -> Option { None } @@ -885,22 +786,6 @@ where impl backend::LocalBackend for Backend where Block::Hash: Ord {} -impl backend::RemoteBackend for Backend -where - Block::Hash: Ord, -{ - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.blockchain - .expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } - - fn remote_blockchain(&self) -> Arc> { - unimplemented!() - } -} - /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index f1c78f6603eb8..a7029d02cbd45 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -21,19 +21,16 @@ pub mod backend; pub mod call_executor; -pub mod cht; pub mod client; pub mod execution_extensions; pub mod in_mem; pub mod leaves; -pub mod light; pub mod notifications; pub mod proof_provider; pub use backend::*; pub use call_executor::*; pub use client::*; -pub use light::*; pub use notifications::*; pub use proof_provider::*; pub use sp_blockchain as blockchain; diff --git a/client/api/src/light.rs b/client/api/src/light.rs deleted file mode 100644 index 8638ddf741f30..0000000000000 --- a/client/api/src/light.rs +++ /dev/null @@ -1,372 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Substrate light client interfaces - -use std::{ - collections::{BTreeMap, HashMap}, - future::Future, - sync::Arc, -}; - -use crate::{ - backend::{AuxStore, NewBlockState}, - ProvideChtRoots, UsageInfo, -}; -use sp_blockchain::{ - well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderBackend, - HeaderMetadata, Result as ClientResult, -}; -use sp_core::{storage::PrefixedStorageKey, ChangesTrieConfigurationRange}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; -use sp_state_machine::StorageProof; - -/// Remote call request. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteCallRequest { - /// Call at state of given block. - pub block: Header::Hash, - /// Header of block at which call is performed. - pub header: Header, - /// Method to call. - pub method: String, - /// Call data. - pub call_data: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote canonical header request. -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] -pub struct RemoteHeaderRequest { - /// The root of CHT this block is included in. - pub cht_root: Header::Hash, - /// Number of the header to query. - pub block: Header::Number, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote storage read request. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadRequest { - /// Read at state of given block. - pub block: Header::Hash, - /// Header of block at which read is performed. - pub header: Header, - /// Storage key to read. - pub keys: Vec>, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote storage read child request. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadChildRequest { - /// Read at state of given block. - pub block: Header::Hash, - /// Header of block at which read is performed. - pub header: Header, - /// Storage key for child. - pub storage_key: PrefixedStorageKey, - /// Child storage key to read. - pub keys: Vec>, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote key changes read request. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct RemoteChangesRequest { - /// All changes trie configurations that are valid within [first_block; last_block]. - pub changes_trie_configs: Vec>, - /// Query changes from range of blocks, starting (and including) with this hash... - pub first_block: (Header::Number, Header::Hash), - /// ...ending (and including) with this hash. Should come after first_block and - /// be the part of the same fork. - pub last_block: (Header::Number, Header::Hash), - /// Only use digests from blocks up to this hash. Should be last_block OR come - /// after this block and be the part of the same fork. - pub max_block: (Header::Number, Header::Hash), - /// Known changes trie roots for the range of blocks [tries_roots.0..max_block]. - /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. - pub tries_roots: (Header::Number, Header::Hash, Vec), - /// Optional Child Storage key to read. - pub storage_key: Option, - /// Storage key to read. - pub key: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Key changes read proof. -#[derive(Debug, PartialEq, Eq)] -pub struct ChangesProof { - /// Max block that has been used in changes query. - pub max_block: Header::Number, - /// All touched nodes of all changes tries. - pub proof: Vec>, - /// All changes tries roots that have been touched AND are missing from - /// the requester' node. It is a map of block number => changes trie root. - pub roots: BTreeMap, - /// The proofs for all changes tries roots that have been touched AND are - /// missing from the requester' node. It is a map of CHT number => proof. - pub roots_proof: StorageProof, -} - -/// Remote block body request -#[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] -pub struct RemoteBodyRequest { - /// Header of the requested block body - pub header: Header, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Light client data fetcher. Implementations of this trait must check if remote data -/// is correct (see FetchedDataChecker) and return already checked data. -pub trait Fetcher: Send + Sync { - /// Remote header future. - type RemoteHeaderResult: Future> - + Unpin - + Send - + 'static; - /// Remote storage read future. - type RemoteReadResult: Future, Option>>, ClientError>> - + Unpin - + Send - + 'static; - /// Remote call result future. - type RemoteCallResult: Future, ClientError>> + Unpin + Send + 'static; - /// Remote changes result future. - type RemoteChangesResult: Future, u32)>, ClientError>> - + Unpin - + Send - + 'static; - /// Remote block body result future. - type RemoteBodyResult: Future, ClientError>> - + Unpin - + Send - + 'static; - - /// Fetch remote header. - fn remote_header( - &self, - request: RemoteHeaderRequest, - ) -> Self::RemoteHeaderResult; - /// Fetch remote storage value. - fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult; - /// Fetch remote storage child value. - fn remote_read_child( - &self, - request: RemoteReadChildRequest, - ) -> Self::RemoteReadResult; - /// Fetch remote call result. - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; - /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed - /// at a given blocks range. - fn remote_changes( - &self, - request: RemoteChangesRequest, - ) -> Self::RemoteChangesResult; - /// Fetch remote block body - fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult; -} - -/// Light client remote data checker. -/// -/// Implementations of this trait should not use any prunable blockchain data -/// except that is passed to its methods. -pub trait FetchChecker: Send + Sync { - /// Check remote header proof. - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - header: Option, - remote_proof: StorageProof, - ) -> ClientResult; - /// Check remote storage read proof. - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>>; - /// Check remote storage read proof. - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>>; - /// Check remote method execution proof. - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: StorageProof, - ) -> ClientResult>; - /// Check remote changes query proof. - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - proof: ChangesProof, - ) -> ClientResult, u32)>>; - /// Check remote body proof. - fn check_body_proof( - &self, - request: &RemoteBodyRequest, - body: Vec, - ) -> ClientResult>; -} - -/// Light client blockchain storage. -pub trait Storage: - AuxStore - + HeaderBackend - + HeaderMetadata - + ProvideChtRoots -{ - /// Store new header. Should refuse to revert any finalized blocks. - /// - /// Takes new authorities, the leaf state of the new block, and - /// any auxiliary storage updates to place in the same operation. - fn import_header( - &self, - header: Block::Header, - cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()>; - - /// Set an existing block as new best block. - fn set_head(&self, block: BlockId) -> ClientResult<()>; - - /// Mark historic header as finalized. - fn finalize_header(&self, block: BlockId) -> ClientResult<()>; - - /// Get last finalized header. - fn last_finalized(&self) -> ClientResult; - - /// Get storage cache. - fn cache(&self) -> Option>>; - - /// Get storage usage statistics. - fn usage_info(&self) -> Option; -} - -/// Remote header. -#[derive(Debug)] -pub enum LocalOrRemote { - /// When data is available locally, it is returned. - Local(Data), - /// When data is unavailable locally, the request to fetch it from remote node is returned. - Remote(Request), - /// When data is unknown. - Unknown, -} - -/// Futures-based blockchain backend that either resolves blockchain data -/// locally, or fetches required data from remote node. -pub trait RemoteBlockchain: Send + Sync { - /// Get block header. - fn header( - &self, - id: BlockId, - ) -> ClientResult>>; -} - -/// Returns future that resolves header either locally, or remotely. -pub fn future_header>( - blockchain: &dyn RemoteBlockchain, - fetcher: &F, - id: BlockId, -) -> impl Future, ClientError>> { - use futures::future::{ready, Either, FutureExt}; - - match blockchain.header(id) { - Ok(LocalOrRemote::Remote(request)) => - Either::Left(fetcher.remote_header(request).then(|header| ready(header.map(Some)))), - Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), - Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), - Err(err) => Either::Right(ready(Err(err))), - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use futures::future::Ready; - use parking_lot::Mutex; - use sp_blockchain::Error as ClientError; - use sp_test_primitives::{Block, Extrinsic, Header}; - - #[derive(Debug, thiserror::Error)] - #[error("Not implemented on test node")] - struct MockError; - - impl Into for MockError { - fn into(self) -> ClientError { - ClientError::Application(Box::new(self)) - } - } - - pub type OkCallFetcher = Mutex>; - - fn not_implemented_in_tests() -> Ready> { - futures::future::ready(Err(MockError.into())) - } - - impl Fetcher for OkCallFetcher { - type RemoteHeaderResult = Ready>; - type RemoteReadResult = Ready, Option>>, ClientError>>; - type RemoteCallResult = Ready, ClientError>>; - type RemoteChangesResult = Ready, u32)>, ClientError>>; - type RemoteBodyResult = Ready, ClientError>>; - - fn remote_header(&self, _request: RemoteHeaderRequest

) -> Self::RemoteHeaderResult { - not_implemented_in_tests() - } - - fn remote_read(&self, _request: RemoteReadRequest
) -> Self::RemoteReadResult { - not_implemented_in_tests() - } - - fn remote_read_child( - &self, - _request: RemoteReadChildRequest
, - ) -> Self::RemoteReadResult { - not_implemented_in_tests() - } - - fn remote_call(&self, _request: RemoteCallRequest
) -> Self::RemoteCallResult { - futures::future::ready(Ok((*self.lock()).clone())) - } - - fn remote_changes( - &self, - _request: RemoteChangesRequest
, - ) -> Self::RemoteChangesResult { - not_implemented_in_tests() - } - - fn remote_body(&self, _request: RemoteBodyRequest
) -> Self::RemoteBodyResult { - not_implemented_in_tests() - } - } -} diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 75f9c55e134d2..6207139f9758b 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -17,10 +17,10 @@ // along with this program. If not, see . //! Proof utilities -use crate::{ChangesProof, CompactProof, StorageProof}; +use crate::{CompactProof, StorageProof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{KeyValueStates, KeyValueStorageLevel}; -use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey}; +use sp_storage::ChildInfo; /// Interface for providing block proving utilities. pub trait ProofProvider { @@ -50,27 +50,6 @@ pub trait ProofProvider { method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; - /// Reads given header and generates CHT-based header proof. - fn header_proof( - &self, - id: &BlockId, - ) -> sp_blockchain::Result<(Block::Header, StorageProof)>; - - /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given - /// blocks range. `min` is the hash of the first block, which changes trie root is known to the - /// requester - when we're using changes tries from ascendants of this block, we should provide - /// proofs for changes tries roots `max` is the hash of the last block known to the requester - - /// we can't use changes tries from descendants of this block. - /// Works only for runtimes that are supporting changes tries. - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result>; /// Given a `BlockId` iterate over all storage values starting at `start_keys`. /// Last `start_keys` element contains last accessed key value. diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 305c4d753c1ea..4c7f6c856ec86 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -41,7 +41,8 @@ use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ generic::BlockId, - traits::{BlakeTwo256, Block as BlockT, DigestFor, Hash as HashT, Header as HeaderT}, + traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, + Digest, }; use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; @@ -261,7 +262,7 @@ where fn propose( self, inherent_data: InherentData, - inherent_digests: DigestFor, + inherent_digests: Digest, max_duration: time::Duration, block_size_limit: Option, ) -> Self::Proposal { @@ -310,7 +311,7 @@ where async fn propose_with( self, inherent_data: InherentData, - inherent_digests: DigestFor, + inherent_digests: Digest, deadline: time::Instant, block_size_limit: Option, ) -> Result, PR::Proof>, sp_blockchain::Error> @@ -690,13 +691,8 @@ mod tests { api.execute_block(&block_id, proposal.block).unwrap(); let state = backend.state_at(block_id).unwrap(); - let changes_trie_state = - backend::changes_tries_state_at_block(&block_id, backend.changes_trie_storage()) - .unwrap(); - let storage_changes = api - .into_storage_changes(&state, changes_trie_state.as_ref(), genesis_hash) - .unwrap(); + let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); assert_eq!( proposal.storage_changes.transaction_storage_root, diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index e89421edfb168..01afaca0cacf4 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -35,7 +35,8 @@ use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, DigestFor, Hash, HashFor, Header as HeaderT, NumberFor, One}, + traits::{Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One}, + Digest, }; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -119,14 +120,14 @@ where fn new_block_at>( &self, parent: &BlockId, - inherent_digests: DigestFor, + inherent_digests: Digest, record_proof: R, ) -> sp_blockchain::Result>; /// Create a new block, built on the head of the chain. fn new_block( &self, - inherent_digests: DigestFor, + inherent_digests: Digest, ) -> sp_blockchain::Result>; } @@ -159,7 +160,7 @@ where parent_hash: Block::Hash, parent_number: NumberFor, record_proof: RecordProof, - inherent_digests: DigestFor, + inherent_digests: Digest, backend: &'a B, ) -> Result { let header = <::Header as HeaderT>::new( @@ -237,15 +238,11 @@ where let proof = self.api.extract_proof(); let state = self.backend.state_at(self.block_id)?; - let changes_trie_state = backend::changes_tries_state_at_block( - &self.block_id, - self.backend.changes_trie_storage(), - )?; let parent_hash = self.parent_hash; let storage_changes = self .api - .into_storage_changes(&state, changes_trie_state.as_ref(), parent_hash) + .into_storage_changes(&state, parent_hash) .map_err(|e| sp_blockchain::Error::StorageChanges(e))?; Ok(BuiltBlock { diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index a4dbe5012ea19..d2c4ad4498d5e 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -33,7 +33,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, - HeaderBackend, ProvideCache, + HeaderBackend, }; use sp_consensus::{CanAuthorWith, Error as ConsensusError}; use sp_consensus_aura::{ @@ -45,7 +45,8 @@ use sp_core::{crypto::Pair, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, DigestItemFor, Header}, + traits::{Block as BlockT, Header}, + DigestItem, }; use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; @@ -61,9 +62,8 @@ fn check_header( hash: B::Hash, authorities: &[AuthorityId

], check_for_equivocation: CheckForEquivocation, -) -> Result)>, Error> +) -> Result, Error> where - DigestItemFor: CompatibleDigestItem, P::Signature: Codec, C: sc_client_api::backend::AuxStore, P::Public: Encode + Decode + PartialEq + Clone, @@ -189,14 +189,8 @@ where #[async_trait::async_trait] impl Verifier for AuraVerifier where - C: ProvideRuntimeApi - + Send - + Sync - + sc_client_api::backend::AuxStore - + ProvideCache - + BlockOf, + C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore + BlockOf, C::Api: BlockBuilderApi + AuraApi> + ApiExt, - DigestItemFor: CompatibleDigestItem, P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, @@ -385,7 +379,6 @@ where C: 'static + ProvideRuntimeApi + BlockOf - + ProvideCache + Send + Sync + AuxStore @@ -395,7 +388,6 @@ where + Send + Sync + 'static, - DigestItemFor: CompatibleDigestItem, P: Pair + Send + Sync + 'static, P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 946e0b90c4dd4..16880ae188ad6 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -52,7 +52,7 @@ use sc_consensus_slots::{ use sc_telemetry::TelemetryHandle; use sp_api::ProvideRuntimeApi; use sp_application_crypto::{AppKey, AppPublic}; -use sp_blockchain::{HeaderBackend, ProvideCache, Result as CResult}; +use sp_blockchain::{HeaderBackend, Result as CResult}; use sp_consensus::{ BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, }; @@ -62,7 +62,8 @@ use sp_inherents::CreateInherentDataProviders; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, DigestItemFor, Header, Member, NumberFor, Zero}, + traits::{Block as BlockT, Header, Member, NumberFor, Zero}, + DigestItem, }; mod import_queue; @@ -178,7 +179,7 @@ where P::Public: AppPublic + Hash + Member + Encode + Decode, P::Signature: TryFrom> + Hash + Member + Encode + Decode, B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, + C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, SC: SelectChain, I: BlockImport> + Send + Sync + 'static, @@ -267,7 +268,7 @@ pub fn build_aura_worker( ) -> impl sc_consensus_slots::SlotWorker>::Proof> where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, + C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, PF: Environment + Send + Sync + 'static, PF::Proposer: Proposer>, @@ -316,7 +317,7 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraWorker where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + HeaderBackend + Sync, + C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync, C::Api: AuraApi>, E: Environment + Send + Sync, E::Proposer: Proposer>, @@ -377,12 +378,8 @@ where }) } - fn pre_digest_data( - &self, - slot: Slot, - _claim: &Self::Claim, - ) -> Vec> { - vec![ as CompatibleDigestItem>::aura_pre_digest(slot)] + fn pre_digest_data(&self, slot: Slot, _claim: &Self::Claim) -> Vec { + vec![>::aura_pre_digest(slot)] } fn block_import_params( @@ -426,7 +423,7 @@ where .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; let signature_digest_item = - as CompatibleDigestItem>::aura_seal(signature); + >::aura_seal(signature); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(signature_digest_item); @@ -545,7 +542,7 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus where A: Codec + Debug, B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache, + C: ProvideRuntimeApi + BlockOf, C::Api: AuraApi, { client @@ -574,7 +571,10 @@ mod tests { use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; - use sp_runtime::traits::{Block as BlockT, DigestFor, Header as _}; + use sp_runtime::{ + traits::{Block as BlockT, Header as _}, + Digest, + }; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{ task::Poll, @@ -611,7 +611,7 @@ mod tests { fn propose( self, _: InherentData, - digests: DigestFor, + digests: Digest, _: Duration, _: Option, ) -> Self::Proposal { @@ -661,29 +661,25 @@ mod tests { _cfg: &ProtocolConfig, _peer_data: &(), ) -> Self::Verifier { - match client { - PeersClient::Full(client, _) => { - let slot_duration = slot_duration(&*client).expect("slot duration available"); - - assert_eq!(slot_duration.slot_duration().as_millis() as u64, SLOT_DURATION); - import_queue::AuraVerifier::new( - client, - Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); - let slot = InherentDataProvider::from_timestamp_and_duration( - *timestamp, - Duration::from_secs(6), - ); - - Ok((timestamp, slot)) - }), - AlwaysCanAuthor, - CheckForEquivocation::Yes, - None, - ) - }, - PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"), - } + let client = client.as_client(); + let slot_duration = slot_duration(&*client).expect("slot duration available"); + + assert_eq!(slot_duration.slot_duration().as_millis() as u64, SLOT_DURATION); + import_queue::AuraVerifier::new( + client, + Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), + AlwaysCanAuthor, + CheckForEquivocation::Yes, + None, + ) } fn make_block_import( @@ -724,7 +720,7 @@ mod tests { for (peer_id, key) in peers { let mut net = net.lock(); let peer = net.peer(*peer_id); - let client = peer.client().as_full().expect("full clients are created").clone(); + let client = peer.client().as_client(); let select_chain = peer.select_chain().expect("full client has a select chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); let keystore = Arc::new( @@ -823,7 +819,7 @@ mod tests { let mut net = net.lock(); let peer = net.peer(3); - let client = peer.client().as_full().expect("full clients are created").clone(); + let client = peer.client().as_client(); let environ = DummyFactory(client.clone()); let worker = AuraWorker { @@ -875,7 +871,7 @@ mod tests { let mut net = net.lock(); let peer = net.peer(3); - let client = peer.client().as_full().expect("full clients are created").clone(); + let client = peer.client().as_client(); let environ = DummyFactory(client.clone()); let mut worker = AuraWorker { diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 1fde788041155..4fb9f750004c5 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -103,9 +103,7 @@ use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE} use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; use sp_application_crypto::AppKey; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::{ - Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, -}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; use sp_consensus::{ BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SlotData, @@ -117,7 +115,8 @@ use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvid use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, DigestItemFor, Header, Zero}, + traits::{Block as BlockT, Header, Zero}, + DigestItem, }; pub use sc_consensus_slots::SlotProportion; @@ -465,7 +464,6 @@ pub fn start_babe( where B: BlockT, C: ProvideRuntimeApi - + ProvideCache + ProvideUncles + BlockchainEvents + HeaderBackend @@ -539,7 +537,6 @@ async fn answer_requests( epoch_changes: SharedEpochChanges, ) where C: ProvideRuntimeApi - + ProvideCache + ProvideUncles + BlockchainEvents + HeaderBackend @@ -677,10 +674,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where B: BlockT, - C: ProvideRuntimeApi - + ProvideCache - + HeaderBackend - + HeaderMetadata, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, C::Api: BabeApi, E: Environment + Sync, E::Proposer: Proposer>, @@ -774,12 +768,8 @@ where }); } - fn pre_digest_data( - &self, - _slot: Slot, - claim: &Self::Claim, - ) -> Vec> { - vec![ as CompatibleDigestItem>::babe_pre_digest(claim.0.clone())] + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { + vec![::babe_pre_digest(claim.0.clone())] } fn block_import_params( @@ -820,8 +810,7 @@ where .clone() .try_into() .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; - let digest_item = - as CompatibleDigestItem>::babe_seal(signature.into()); + let digest_item = ::babe_seal(signature.into()); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(digest_item); @@ -921,10 +910,7 @@ pub fn find_pre_digest(header: &B::Header) -> Result( header: &B::Header, -) -> Result, Error> -where - DigestItemFor: CompatibleDigestItem, -{ +) -> Result, Error> { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); @@ -943,10 +929,7 @@ where /// Extract the BABE config change digest from the given header, if it exists. fn find_next_config_digest( header: &B::Header, -) -> Result, Error> -where - DigestItemFor: CompatibleDigestItem, -{ +) -> Result, Error> { let mut config_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); @@ -1132,8 +1115,7 @@ where + ProvideRuntimeApi + Send + Sync - + AuxStore - + ProvideCache, + + AuxStore, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, @@ -1332,7 +1314,6 @@ where + HeaderMetadata + AuxStore + ProvideRuntimeApi - + ProvideCache + Send + Sync, Client::Api: BabeApi + ApiExt, @@ -1399,7 +1380,6 @@ where + HeaderMetadata + AuxStore + ProvideRuntimeApi - + ProvideCache + Send + Sync, Client::Api: BabeApi + ApiExt, @@ -1756,7 +1736,6 @@ where + Sync + 'static, Client: ProvideRuntimeApi - + ProvideCache + HeaderBackend + HeaderMetadata + AuxStore diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index c033f4535be0b..73cc453812eae 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -43,13 +43,13 @@ use sp_consensus_babe::{ use sp_core::crypto::Pair; use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; use sp_runtime::{ - generic::DigestItem, - traits::{Block as BlockT, DigestFor}, + generic::{Digest, DigestItem}, + traits::Block as BlockT, }; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{cell::RefCell, task::Poll, time::Duration}; -type Item = DigestItem; +type Item = DigestItem; type Error = sp_blockchain::Error; @@ -108,7 +108,7 @@ impl Environment for DummyFactory { impl DummyProposer { fn propose_with( &mut self, - pre_digests: DigestFor, + pre_digests: Digest, ) -> future::Ready< Result< Proposal< @@ -181,7 +181,7 @@ impl Proposer for DummyProposer { fn propose( mut self, _: InherentData, - pre_digests: DigestFor, + pre_digests: Digest, _: Duration, _: Option, ) -> Self::Proposal { @@ -295,7 +295,7 @@ impl TestNetFactory for BabeTestNet { Option>, Option, ) { - let client = client.as_full().expect("only full clients are tested"); + let client = client.as_client(); let config = Config::get_or_compute(&*client).expect("config available"); let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) @@ -320,7 +320,7 @@ impl TestNetFactory for BabeTestNet { ) -> Self::Verifier { use substrate_test_runtime_client::DefaultTestClientBuilderExt; - let client = client.as_full().expect("only full clients are used in test"); + let client = client.as_client(); trace!(target: "babe", "Creating a verifier"); // ensure block import and verifier are linked correctly. @@ -395,7 +395,7 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static for (peer_id, seed) in peers { let mut net = net.lock(); let peer = net.peer(*peer_id); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let select_chain = peer.select_chain().expect("Full client has select_chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); @@ -679,7 +679,7 @@ fn importing_block_one_sets_genesis_epoch() { let peer = net.peer(0); let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let mut proposer_factory = DummyFactory { client: client.clone(), @@ -721,7 +721,7 @@ fn importing_epoch_change_block_prunes_tree() { let peer = net.peer(0); let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let mut block_import = data.block_import.lock().take().expect("import set up during init"); let epoch_changes = data.link.epoch_changes.clone(); @@ -836,7 +836,7 @@ fn verify_slots_are_strictly_increasing() { let peer = net.peer(0); let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let mut block_import = data.block_import.lock().take().expect("import set up during init"); let mut proposer_factory = DummyFactory { diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index af118312dd07c..174b2d03c6ef0 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -32,7 +32,7 @@ use sp_consensus_babe::{ }; use sp_consensus_slots::Slot; use sp_core::{Pair, Public}; -use sp_runtime::traits::{DigestItemFor, Header}; +use sp_runtime::{traits::Header, DigestItem}; /// BABE verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { @@ -61,10 +61,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// with each having different validation logic. pub(super) fn check_header( params: VerificationParams, -) -> Result>, Error> -where - DigestItemFor: CompatibleDigestItem, -{ +) -> Result, Error> { let VerificationParams { mut header, pre_digest, slot_now, epoch } = params; let authorities = &epoch.authorities; @@ -137,9 +134,9 @@ where Ok(CheckedHeader::Checked(header, info)) } -pub(super) struct VerifiedHeaderInfo { - pub(super) pre_digest: DigestItemFor, - pub(super) seal: DigestItemFor, +pub(super) struct VerifiedHeaderInfo { + pub(super) pre_digest: DigestItem, + pub(super) seal: DigestItem, pub(super) author: AuthorityId, } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index 5294db2396042..03fde159736ca 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -20,8 +20,8 @@ use serde::{Deserialize, Serialize}; use sp_runtime::{ - traits::{Block as BlockT, DigestItemFor, HashFor, Header as HeaderT, NumberFor}, - Justification, Justifications, + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, + DigestItem, Justification, Justifications, }; use std::{any::Any, borrow::Cow, collections::HashMap, sync::Arc}; @@ -122,7 +122,7 @@ pub struct BlockCheckParams { /// Precomputed storage. pub enum StorageChanges { /// Changes coming from block execution. - Changes(sp_state_machine::StorageChanges, NumberFor>), + Changes(sp_state_machine::StorageChanges>), /// Whole new state. Import(ImportedState), } @@ -175,7 +175,7 @@ pub struct BlockImportParams { pub justifications: Option, /// Digest items that have been added after the runtime for external /// work, like a consensus signature. - pub post_digests: Vec>, + pub post_digests: Vec, /// The body of the block. pub body: Option>, /// Indexed transaction body of the block. diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index 33a4c8616f6d2..4284d40179d2f 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -21,7 +21,7 @@ use super::Error; use sc_consensus::BlockImportParams; use sp_inherents::InherentData; -use sp_runtime::traits::{Block as BlockT, DigestFor}; +use sp_runtime::{traits::Block as BlockT, Digest}; pub mod babe; @@ -32,11 +32,7 @@ pub trait ConsensusDataProvider: Send + Sync { type Transaction; /// Attempt to create a consensus digest. - fn create_digest( - &self, - parent: &B::Header, - inherents: &InherentData, - ) -> Result, Error>; + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; /// set up the neccessary import params. fn append_block_import( diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 1d3afe392d62f..e06c544aaedc3 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -49,7 +49,8 @@ use sp_consensus_slots::Slot; use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; use sp_runtime::{ generic::{BlockId, Digest}, - traits::{Block as BlockT, DigestFor, DigestItemFor, Header, Zero}, + traits::{Block as BlockT, Header, Zero}, + DigestItem, }; use sp_timestamp::{InherentType, TimestampInherentData, INHERENT_IDENTIFIER}; @@ -193,11 +194,7 @@ where { type Transaction = TransactionFor; - fn create_digest( - &self, - parent: &B::Header, - inherents: &InherentData, - ) -> Result, Error> { + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { let slot = inherents .babe_inherent_data()? .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; @@ -207,7 +204,7 @@ where let logs = if let Some((predigest, _)) = authorship::claim_slot(slot, &epoch, &self.keystore) { - vec![ as CompatibleDigestItem>::babe_pre_digest(predigest)] + vec![::babe_pre_digest(predigest)] } else { // well we couldn't claim a slot because this is an existing chain and we're not in the // authorities. we need to tell BabeBlockImport that the epoch has changed, and we put @@ -244,13 +241,13 @@ where }); vec![ - DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()), - DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode()), + DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode()), + DigestItem::Consensus(BABE_ENGINE_ID, next_epoch.encode()), ] }, ViableEpochDescriptor::UnimportedGenesis(_) => { // since this is the genesis, secondary predigest works for now. - vec![DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode())] + vec![DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode())] }, } }; diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 7b1012888e869..3ab0b977255ee 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -55,7 +55,7 @@ use sc_consensus::{ }; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; +use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend}; use sp_consensus::{ CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; @@ -240,7 +240,7 @@ where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, - C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, CAW: CanAuthorWith, @@ -319,7 +319,7 @@ where I: BlockImport> + Send + Sync, I::Error: Into, S: SelectChain, - C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm + Send + Sync, Algorithm::Difficulty: 'static + Send, @@ -425,10 +425,7 @@ impl PowVerifier { Self { algorithm, _marker: PhantomData } } - fn check_header( - &self, - mut header: B::Header, - ) -> Result<(B::Header, DigestItem), Error> + fn check_header(&self, mut header: B::Header) -> Result<(B::Header, DigestItem), Error> where Algorithm: PowAlgorithm, { @@ -630,7 +627,7 @@ where }, }; - let mut inherent_digest = Digest::::default(); + let mut inherent_digest = Digest::default(); if let Some(pre_runtime) = &pre_runtime { inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); } @@ -702,10 +699,7 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err } /// Fetch PoW seal. -fn fetch_seal( - digest: Option<&DigestItem>, - hash: B::Hash, -) -> Result, Error> { +fn fetch_seal(digest: Option<&DigestItem>, hash: B::Hash) -> Result, Error> { match digest { Some(DigestItem::Seal(id, seal)) => if id == &POW_ENGINE_ID { diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index bfaa388014ef0..ead209ef86a65 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -45,7 +45,7 @@ use sp_consensus_slots::Slot; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, + traits::{Block as BlockT, HashFor, Header as HeaderT}, }; use sp_timestamp::Timestamp; use std::{fmt::Debug, ops::Deref, time::Duration}; @@ -54,7 +54,7 @@ use std::{fmt::Debug, ops::Deref, time::Duration}; /// /// See [`sp_state_machine::StorageChanges`] for more information. pub type StorageChanges = - sp_state_machine::StorageChanges, NumberFor>; + sp_state_machine::StorageChanges>; /// The result of [`SlotWorker::on_slot`]. #[derive(Debug, Clone)] @@ -141,11 +141,7 @@ pub trait SimpleSlotWorker { fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} /// Return the pre digest data to include in a block authored with the given claim. - fn pre_digest_data( - &self, - slot: Slot, - claim: &Self::Claim, - ) -> Vec>; + fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec; /// Returns a function which produces a `BlockImportParams`. fn block_import_params( diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs deleted file mode 100644 index 795cb8f901183..0000000000000 --- a/client/db/src/cache/list_cache.rs +++ /dev/null @@ -1,2351 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! List-based cache. -//! -//! Maintains several lists, containing nodes that are inserted whenever -//! cached value at new block differs from the value at previous block. -//! Example: -//! B1(a) <--- B2(b) <--- B3(b) <--- B4(c) -//! N1(b) <-------------- N2(c) -//! -//! There's single list for all finalized blocks and >= 0 lists for unfinalized -//! blocks. -//! When new non-final block is inserted (with value that differs from the value -//! at parent), it starts new unfinalized fork. -//! When new final block is inserted (with value that differs from the value at -//! parent), new entry is appended to the finalized fork. -//! When existing non-final block is finalized (with value that differs from the -//! value at parent), new entry is appended to the finalized fork AND unfinalized -//! fork is dropped. -//! -//! Entries from abandoned unfinalized forks (forks that are forking from block B -//! which is ascendant of the best finalized block) are deleted when block F with -//! number B.number (i.e. 'parallel' canon block) is finalized. -//! -//! Finalized entry E1 is pruned when block B is finalized so that: -//! EntryAt(B.number - prune_depth).points_to(E1) - -use std::collections::{BTreeMap, BTreeSet}; - -use log::warn; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_runtime::traits::{Block as BlockT, Bounded, CheckedSub, NumberFor, Zero}; - -use crate::cache::{ - list_entry::{Entry, StorageEntry}, - list_storage::{Metadata, Storage, StorageTransaction}, - CacheItemT, ComplexBlockId, EntryType, -}; - -/// Pruning strategy. -#[derive(Debug, Clone, Copy)] -pub enum PruningStrategy { - /// Prune entries when they're too far behind best finalized block. - ByDepth(N), - /// Do not prune old entries at all. - NeverPrune, -} - -/// List-based cache. -pub struct ListCache> { - /// Cache storage. - storage: S, - /// Pruning strategy. - pruning_strategy: PruningStrategy>, - /// Best finalized block. - best_finalized_block: ComplexBlockId, - /// Best finalized entry (if exists). - best_finalized_entry: Option>, - /// All unfinalized 'forks'. - unfinalized: Vec>, -} - -/// All possible list cache operations that could be performed after transaction is committed. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub enum CommitOperation { - /// New block is appended to the fork without changing the cached value. - AppendNewBlock(usize, ComplexBlockId), - /// New block is appended to the fork with the different value. - AppendNewEntry(usize, Entry), - /// New fork is added with the given head entry. - AddNewFork(Entry), - /// New block is finalized and possibly: - /// - new entry is finalized AND/OR - /// - some forks are destroyed - BlockFinalized(ComplexBlockId, Option>, BTreeSet), - /// When best block is reverted - contains the forks that have to be updated - /// (they're either destroyed, or their best entry is updated to earlier block). - BlockReverted(BTreeMap>>), -} - -/// A set of commit operations. -#[derive(Debug)] -pub struct CommitOperations { - operations: Vec>, -} - -/// Single fork of list-based cache. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct Fork { - /// The best block of this fork. We do not save this field in the database to avoid - /// extra updates => it could be None after restart. It will be either filled when - /// the block is appended to this fork, or the whole fork will be abandoned when the - /// block from the other fork is finalized - best_block: Option>, - /// The head entry of this fork. - head: Entry, -} - -/// Outcome of Fork::try_append_or_fork. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub enum ForkAppendResult { - /// New entry should be appended to the end of the fork. - Append, - /// New entry should be forked from the fork, starting with entry at given block. - Fork(ComplexBlockId), -} - -impl> ListCache { - /// Create new db list cache entry. - pub fn new( - storage: S, - pruning_strategy: PruningStrategy>, - best_finalized_block: ComplexBlockId, - ) -> ClientResult { - let (best_finalized_entry, unfinalized) = - storage.read_meta().and_then(|meta| read_forks(&storage, meta))?; - - Ok(ListCache { - storage, - pruning_strategy, - best_finalized_block, - best_finalized_entry, - unfinalized, - }) - } - - /// Get reference to the storage. - pub fn storage(&self) -> &S { - &self.storage - } - - /// Get unfinalized forks reference. - #[cfg(test)] - pub fn unfinalized(&self) -> &[Fork] { - &self.unfinalized - } - - /// Get value valid at block. - pub fn value_at_block( - &self, - at: &ComplexBlockId, - ) -> ClientResult, Option>, T)>> { - let head = if at.number <= self.best_finalized_block.number { - // if the block is older than the best known finalized block - // => we're should search for the finalized value - - // BUT since we're not guaranteeing to provide correct values for forks - // behind the finalized block, check if the block is finalized first - if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { - return Err(ClientError::NotInFinalizedChain) - } - - self.best_finalized_entry.as_ref() - } else if self.unfinalized.is_empty() { - // there are no unfinalized entries - // => we should search for the finalized value - self.best_finalized_entry.as_ref() - } else { - // there are unfinalized entries - // => find the fork containing given block and read from this fork - // IF there's no matching fork, ensure that this isn't a block from a fork that has - // forked behind the best finalized block and search at finalized fork - - match self.find_unfinalized_fork(&at)? { - Some(fork) => Some(&fork.head), - None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) - if chain::is_connected_to_block( - &self.storage, - &at, - &best_finalized_entry.valid_from, - )? => - Some(best_finalized_entry), - _ => None, - }, - } - }; - - match head { - Some(head) => head - .search_best_before(&self.storage, at.number) - .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), - None => Ok(None), - } - } - - /// When new block is inserted into database. - /// - /// None passed as value means that the value has not changed since previous block. - pub fn on_block_insert>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - value: Option, - entry_type: EntryType, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations - .append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations.append(self.do_on_block_finalize(tx, parent, block, operations)?)) - } - - /// When block is reverted. - pub fn on_block_revert>( - &self, - tx: &mut Tx, - reverted_block: &ComplexBlockId, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations.append(Some(self.do_on_block_revert(tx, reverted_block)?))) - } - - /// When transaction is committed. - pub fn on_transaction_commit(&mut self, ops: CommitOperations) { - for op in ops.operations { - match op { - CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index).expect( - "ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed", - ); - fork.best_block = Some(best_block); - }, - CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index).expect( - "ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed", - ); - fork.best_block = Some(entry.valid_from.clone()); - fork.head = entry; - }, - CommitOperation::AddNewFork(entry) => { - self.unfinalized - .push(Fork { best_block: Some(entry.valid_from.clone()), head: entry }); - }, - CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { - self.best_finalized_block = block; - if let Some(finalizing_entry) = finalizing_entry { - self.best_finalized_entry = Some(finalizing_entry); - } - for fork_index in forks.iter().rev() { - self.unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(forks) => { - for (fork_index, updated_fork) in forks.into_iter().rev() { - match updated_fork { - Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, - None => { - self.unfinalized.remove(fork_index); - }, - } - } - }, - } - } - } - - fn do_on_block_insert>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - value: Option, - entry_type: EntryType, - operations: &CommitOperations, - ) -> ClientResult>> { - // this guarantee is currently provided by LightStorage && we're relying on it here - let prev_operation = operations.operations.last(); - debug_assert!( - entry_type != EntryType::Final || - self.unfinalized.is_empty() || - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => - best_finalized_block.hash == parent.hash, - _ => false, - } - ); - - // we do not store any values behind finalized - if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { - return Ok(None) - } - - // if the block is not final, it is possibly appended to/forking from existing unfinalized - // fork - let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis; - if !is_final { - let mut fork_and_action = None; - - // when value hasn't changed and block isn't final, there's nothing we need to do - if value.is_none() { - return Ok(None) - } - - // first: try to find fork that is known to has the best block we're appending to - for (index, fork) in self.unfinalized.iter().enumerate() { - if fork.try_append(&parent) { - fork_and_action = Some((index, ForkAppendResult::Append)); - break - } - } - - // if not found, check cases: - // - we're appending to the fork for the first time after restart; - // - we're forking existing unfinalized fork from the middle; - if fork_and_action.is_none() { - let best_finalized_entry_block = - self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); - for (index, fork) in self.unfinalized.iter().enumerate() { - if let Some(action) = - fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? - { - fork_and_action = Some((index, action)); - break - } - } - } - - // if we have found matching unfinalized fork => early exit - match fork_and_action { - // append to unfinalized fork - Some((index, ForkAppendResult::Append)) => { - let new_storage_entry = match self.unfinalized[index].head.try_update(value) { - Some(new_storage_entry) => new_storage_entry, - None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))), - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = - CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); - tx.update_meta( - self.best_finalized_entry.as_ref(), - &self.unfinalized, - &operation, - ); - return Ok(Some(operation)) - }, - // fork from the middle of unfinalized fork - Some((_, ForkAppendResult::Fork(prev_valid_from))) => { - // it is possible that we're inserting extra (but still required) fork here - let new_storage_entry = StorageEntry { - prev_valid_from: Some(prev_valid_from), - value: value.expect("checked above that !value.is_none(); qed"), - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = - CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta( - self.best_finalized_entry.as_ref(), - &self.unfinalized, - &operation, - ); - return Ok(Some(operation)) - }, - None => (), - } - } - - // if we're here, then one of following is true: - // - either we're inserting final block => all ancestors are already finalized AND the only - // thing we can do is to try to update last finalized entry - // - either we're inserting non-final blocks that has no ancestors in any known unfinalized - // forks - - let new_storage_entry = match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) => best_finalized_entry.try_update(value), - None if value.is_some() => Some(StorageEntry { - prev_valid_from: None, - value: value.expect("value.is_some(); qed"), - }), - None => None, - }; - - if !is_final { - return Ok(match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = - CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta( - self.best_finalized_entry.as_ref(), - &self.unfinalized, - &operation, - ); - Some(operation) - }, - None => None, - }) - } - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); - self.prune_finalized_entries(tx, &block); - - match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::BlockFinalized( - block.clone(), - Some(new_storage_entry.into_entry(block)), - abandoned_forks, - ); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - Ok(Some(operation)) - }, - None => Ok(Some(CommitOperation::BlockFinalized(block, None, abandoned_forks))), - } - } - - fn do_on_block_finalize>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - operations: &CommitOperations, - ) -> ClientResult>> { - // this guarantee is currently provided by db backend && we're relying on it here - let prev_operation = operations.operations.last(); - debug_assert!( - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => - best_finalized_block.hash == parent.hash, - _ => false, - } - ); - - // there could be at most one entry that is finalizing - let finalizing_entry = - self.storage.read_entry(&block)?.map(|entry| entry.into_entry(block.clone())); - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); - self.prune_finalized_entries(tx, &block); - - let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - - Ok(Some(operation)) - } - - fn do_on_block_revert>( - &self, - tx: &mut Tx, - reverted_block: &ComplexBlockId, - ) -> ClientResult> { - // can't revert finalized blocks - debug_assert!(self.best_finalized_block.number < reverted_block.number); - - // iterate all unfinalized forks and truncate/destroy if required - let mut updated = BTreeMap::new(); - for (index, fork) in self.unfinalized.iter().enumerate() { - // we only need to truncate fork if its head is ancestor of truncated block - if fork.head.valid_from.number < reverted_block.number { - continue - } - - // we only need to truncate fork if its head is connected to truncated block - if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? - { - continue - } - - let updated_fork = fork.truncate( - &self.storage, - tx, - reverted_block.number, - self.best_finalized_block.number, - )?; - updated.insert(index, updated_fork); - } - - // schedule commit operation and update meta - let operation = CommitOperation::BlockReverted(updated); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - - Ok(operation) - } - - /// Prune old finalized entries. - fn prune_finalized_entries>( - &self, - tx: &mut Tx, - block: &ComplexBlockId, - ) { - let prune_depth = match self.pruning_strategy { - PruningStrategy::ByDepth(prune_depth) => prune_depth, - PruningStrategy::NeverPrune => return, - }; - - let mut do_pruning = || -> ClientResult<()> { - // calculate last ancient block number - let ancient_block = match block.number.checked_sub(&prune_depth) { - Some(number) => match self.storage.read_id(number)? { - Some(hash) => ComplexBlockId::new(hash, number), - None => return Ok(()), - }, - None => return Ok(()), - }; - - // if there's an entry at this block: - // - remove reference from this entry to the previous entry - // - destroy fork starting with previous entry - let current_entry = match self.storage.read_entry(&ancient_block)? { - Some(current_entry) => current_entry, - None => return Ok(()), - }; - let first_entry_to_truncate = match current_entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - - // truncate ancient entry - tx.insert_storage_entry( - &ancient_block, - &StorageEntry { prev_valid_from: None, value: current_entry.value }, - ); - - // destroy 'fork' ending with previous entry - destroy_fork(first_entry_to_truncate, &self.storage, tx, None) - }; - - if let Err(error) = do_pruning() { - warn!(target: "db", "Failed to prune ancient cache entries: {}", error); - } - } - - /// Try to destroy abandoned forks (forked before best finalized block) when block is finalized. - fn destroy_abandoned_forks>( - &self, - tx: &mut Tx, - block: &ComplexBlockId, - prev_operation: Option<&CommitOperation>, - ) -> BTreeSet { - // if some block has been finalized already => take it into account - let prev_abandoned_forks = match prev_operation { - Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => - Some(abandoned_forks), - _ => None, - }; - - let mut destroyed = prev_abandoned_forks.cloned().unwrap_or_else(|| BTreeSet::new()); - let live_unfinalized = self.unfinalized.iter().enumerate().filter(|(idx, _)| { - prev_abandoned_forks - .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) - .unwrap_or(true) - }); - for (index, fork) in live_unfinalized { - if fork.head.valid_from.number == block.number { - destroyed.insert(index); - if fork.head.valid_from.hash != block.hash { - if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) { - warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error); - } - } - } - } - - destroyed - } - - /// Search unfinalized fork where given block belongs. - fn find_unfinalized_fork( - &self, - block: &ComplexBlockId, - ) -> ClientResult>> { - for unfinalized in &self.unfinalized { - if unfinalized.matches(&self.storage, block)? { - return Ok(Some(&unfinalized)) - } - } - - Ok(None) - } -} - -impl Fork { - /// Get reference to the head entry of this fork. - pub fn head(&self) -> &Entry { - &self.head - } - - /// Check if the block is the part of the fork. - pub fn matches>( - &self, - storage: &S, - block: &ComplexBlockId, - ) -> ClientResult { - let range = self.head.search_best_range_before(storage, block.number)?; - match range { - None => Ok(false), - Some((begin, end)) => - chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), - } - } - - /// Try to append NEW block to the fork. This method will only 'work' (return true) when block - /// is actually appended to the fork AND the best known block of the fork is known (i.e. some - /// block has been already appended to this fork after last restart). - pub fn try_append(&self, parent: &ComplexBlockId) -> bool { - // when the best block of the fork is known, the check is trivial - // - // most of calls will hopefully end here, because best_block is only unknown - // after restart and until new block is appended to the fork - self.best_block.as_ref() == Some(parent) - } - - /// Try to append new block to the fork OR fork it. - pub fn try_append_or_fork>( - &self, - storage: &S, - parent: &ComplexBlockId, - best_finalized_entry_block: Option>, - ) -> ClientResult>> { - // try to find entries that are (possibly) surrounding the parent block - let range = self.head.search_best_range_before(storage, parent.number)?; - let begin = match range { - Some((begin, _)) => begin, - None => return Ok(None), - }; - - // check if the parent is connected to the beginning of the range - if !chain::is_connected_to_block(storage, parent, &begin)? { - return Ok(None) - } - - // the block is connected to the begin-entry. If begin is the head entry - // => we need to append new block to the fork - if begin == self.head.valid_from { - return Ok(Some(ForkAppendResult::Append)) - } - - // the parent block belongs to this fork AND it is located after last finalized entry - // => we need to make a new fork - if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) { - return Ok(Some(ForkAppendResult::Fork(begin))) - } - - Ok(None) - } - - /// Destroy fork by deleting all unfinalized entries. - pub fn destroy, Tx: StorageTransaction>( - &self, - storage: &S, - tx: &mut Tx, - best_finalized_block: Option>, - ) -> ClientResult<()> { - destroy_fork(self.head.valid_from.clone(), storage, tx, best_finalized_block) - } - - /// Truncate fork by deleting all entries that are descendants of given block. - pub fn truncate, Tx: StorageTransaction>( - &self, - storage: &S, - tx: &mut Tx, - reverting_block: NumberFor, - best_finalized_block: NumberFor, - ) -> ClientResult>> { - let mut current = self.head.valid_from.clone(); - loop { - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - - // truncation stops when we have reached the ancestor of truncated block - if current.number < reverting_block { - // if we have reached finalized block => destroy fork - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(None) - } - - // else fork needs to be updated - return Ok(Some(Fork { best_block: None, head: entry.into_entry(current) })) - } - - tx.remove_storage_entry(¤t); - - // truncation also stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(None), - }; - } - } -} - -impl Default for CommitOperations { - fn default() -> Self { - CommitOperations { operations: Vec::new() } - } -} - -// This should never be allowed for non-test code to avoid revealing its internals. -#[cfg(test)] -impl From>> - for CommitOperations -{ - fn from(operations: Vec>) -> Self { - CommitOperations { operations } - } -} - -impl CommitOperations { - /// Append operation to the set. - fn append(&mut self, new_operation: Option>) { - let new_operation = match new_operation { - Some(new_operation) => new_operation, - None => return, - }; - - let last_operation = match self.operations.pop() { - Some(last_operation) => last_operation, - None => { - self.operations.push(new_operation); - return - }, - }; - - // we are able (and obliged to) to merge two consequent block finalization operations - match last_operation { - CommitOperation::BlockFinalized( - old_finalized_block, - old_finalized_entry, - old_abandoned_forks, - ) => match new_operation { - CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - ) => { - self.operations.push(CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - )); - }, - _ => { - self.operations.push(CommitOperation::BlockFinalized( - old_finalized_block, - old_finalized_entry, - old_abandoned_forks, - )); - self.operations.push(new_operation); - }, - }, - _ => { - self.operations.push(last_operation); - self.operations.push(new_operation); - }, - } - } -} - -/// Destroy fork by deleting all unfinalized entries. -pub fn destroy_fork< - Block: BlockT, - T: CacheItemT, - S: Storage, - Tx: StorageTransaction, ->( - head_valid_from: ComplexBlockId, - storage: &S, - tx: &mut Tx, - best_finalized_block: Option>, -) -> ClientResult<()> { - let mut current = head_valid_from; - loop { - // optionally: deletion stops when we found entry at finalized block - if let Some(best_finalized_block) = best_finalized_block { - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(()) - } - } - - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - tx.remove_storage_entry(¤t); - - // deletion stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - } -} - -/// Blockchain related functions. -mod chain { - use super::*; - use sp_runtime::traits::Header as HeaderT; - - /// Is the block1 connected both ends of the range. - pub fn is_connected_to_range>( - storage: &S, - block: &ComplexBlockId, - range: (&ComplexBlockId, Option<&ComplexBlockId>), - ) -> ClientResult { - let (begin, end) = range; - Ok(is_connected_to_block(storage, block, begin)? && - match end { - Some(end) => is_connected_to_block(storage, block, end)?, - None => true, - }) - } - - /// Is the block1 directly connected (i.e. part of the same fork) to block2? - pub fn is_connected_to_block>( - storage: &S, - block1: &ComplexBlockId, - block2: &ComplexBlockId, - ) -> ClientResult { - let (begin, end) = if *block1 > *block2 { (block2, block1) } else { (block1, block2) }; - let mut current = storage - .read_header(&end.hash)? - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; - while *current.number() > begin.number { - current = storage - .read_header(current.parent_hash())? - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; - } - - Ok(begin.hash == current.hash()) - } - - /// Returns true if the given block is finalized. - pub fn is_finalized_block>( - storage: &S, - block: &ComplexBlockId, - best_finalized_block: NumberFor, - ) -> ClientResult { - if block.number > best_finalized_block { - return Ok(false) - } - - storage.read_id(block.number).map(|hash| hash.as_ref() == Some(&block.hash)) - } -} - -/// Read list cache forks at blocks IDs. -fn read_forks>( - storage: &S, - meta: Metadata, -) -> ClientResult<(Option>, Vec>)> { - let finalized = match meta.finalized { - Some(finalized) => Some(storage.require_entry(&finalized)?.into_entry(finalized)), - None => None, - }; - - let unfinalized = meta - .unfinalized - .into_iter() - .map(|unfinalized| { - storage.require_entry(&unfinalized).map(|storage_entry| Fork { - best_block: None, - head: storage_entry.into_entry(unfinalized), - }) - }) - .collect::>()?; - - Ok((finalized, unfinalized)) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cache::list_storage::tests::{DummyStorage, DummyTransaction, FaultyStorage}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; - use substrate_test_runtime_client::runtime::H256; - - type Block = RawBlock>; - - fn test_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(H256::from_low_u64_be(number), number) - } - - fn correct_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(test_header(number).hash(), number) - } - - fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId { - ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number) - } - - fn test_header(number: u64) -> Header { - Header { - parent_hash: if number == 0 { - Default::default() - } else { - test_header(number - 1).hash() - }, - number, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header { - if fork_from == number { - test_header(number) - } else { - Header { - parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(), - number, - state_root: H256::from_low_u64_be(1 + fork_nonce), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - } - - #[test] - fn list_value_at_block_works() { - // when block is earlier than best finalized block AND it is not finalized - // --- 50 --- - // ----------> [100] - assert!(ListCache::<_, u64, _>::new( - DummyStorage::new(), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(50)) - .is_err()); - // when block is earlier than best finalized block AND it is finalized AND value is some - // [30] ---- 50 ---> [100] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(50)) - .unwrap(), - Some((test_id(30), Some(test_id(100)), 30)) - ); - // when block is the best finalized block AND value is some - // ---> [100] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(100, H256::from_low_u64_be(100)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(100)) - .unwrap(), - Some((test_id(100), None, 100)) - ); - // when block is parallel to the best finalized block - // ---- 100 - // ---> [100] - assert!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)) - .is_err()); - - // when block is later than last finalized block AND there are no forks AND finalized value - // is Some ---> [100] --- 200 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(200)) - .unwrap(), - Some((test_id(100), None, 100)) - ); - - // when block is later than last finalized block AND there are no matching forks - // AND block is connected to finalized block AND finalized value is Some - // --- 3 - // ---> [2] /---------> [4] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&fork_id(0, 2, 3)) - .unwrap(), - Some((correct_id(2), None, 2)) - ); - // when block is later than last finalized block AND there are no matching forks - // AND block is not connected to finalized block - // --- 2 --- 3 - // 1 /---> [2] ---------> [4] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 2)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&fork_id(0, 1, 3)) - .unwrap(), - None - ); - - // when block is later than last finalized block AND it appends to unfinalized fork from the - // end AND unfinalized value is Some - // ---> [2] ---> [4] ---> 5 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&correct_id(5)) - .unwrap(), - Some((correct_id(4), None, 4)) - ); - // when block is later than last finalized block AND it does not fits unfinalized fork - // AND it is connected to the finalized block AND finalized value is Some - // ---> [2] ----------> [4] - // \--- 3 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&fork_id(0, 2, 3)) - .unwrap(), - Some((correct_id(2), None, 2)) - ); - } - - #[test] - fn list_on_block_insert_works() { - let nfin = EntryType::NonFinal; - let fin = EntryType::Final; - - // when trying to insert block < finalized number - let mut ops = Default::default(); - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap() - .do_on_block_insert( - &mut DummyTransaction::new(), - test_id(49), - test_id(50), - Some(50), - nfin, - &mut ops, - ) - .unwrap() - .is_none()); - // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap() - .do_on_block_insert( - &mut DummyTransaction::new(), - test_id(99), - test_id(100), - Some(100), - nfin, - &Default::default(), - ) - .unwrap() - .is_none()); - - // when trying to insert non-final block AND it appends to the best block of unfinalized - // fork AND new value is the same as in the fork' best block - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![test_id(4)]) - .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), - PruningStrategy::ByDepth(1024), - test_id(2), - ) - .unwrap(); - cache.unfinalized[0].best_block = Some(test_id(4)); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - test_id(4), - test_id(5), - Some(4), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AppendNewBlock(0, test_id(5))), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it appends to the best block of unfinalized - // fork AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - test_id(4), - test_id(5), - Some(5), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 })), - ); - assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] }) - ); - - // when trying to insert non-final block AND it is the first block that appends to the best - // block of unfinalized fork AND new value is the same as in the fork' best block - let cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) - .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), - test_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(4), - nfin, - &Default::default(), - ) - .unwrap(), - Some(CommitOperation::AppendNewBlock(0, correct_id(5))), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it is the first block that appends to the best - // block of unfinalized fork AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(5), - nfin, - &Default::default(), - ) - .unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 })), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] }) - ); - - // when trying to insert non-final block AND it forks unfinalized fork - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }, - ) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(3), - fork_id(0, 3, 4), - Some(14), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 })), - ); - assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { - finalized: Some(correct_id(2)), - unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] - }) - ); - - // when trying to insert non-final block AND there are no unfinalized forks - // AND value is the same as last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(2), - nfin, - &Default::default() - ) - .unwrap(), - None, - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND there are no unfinalized forks - // AND value differs from last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(3), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 })), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] }) - ); - - // when inserting finalized entry AND there are no previous finalized entries - let cache = - ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(3), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - Some(Entry { valid_from: correct_id(3), value: 3 }), - Default::default(), - )), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) - ); - // when inserting finalized entry AND value is the same as in previous finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(2), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when inserting finalized entry AND value differs from previous finalized - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(3), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - Some(Entry { valid_from: correct_id(3), value: 3 }), - Default::default(), - )), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) - ); - - // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(2), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - None, - vec![0].into_iter().collect() - )), - ); - } - - #[test] - fn list_on_block_finalized_works() { - // finalization does not finalizes entry if it does not exists - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, - ), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) - .unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(5)] }), - ); - // finalization finalizes entry - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, - ), - PruningStrategy::ByDepth(1024), - correct_id(4), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(5), - Some(Entry { valid_from: correct_id(5), value: 5 }), - vec![0].into_iter().collect(), - )), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] }) - ); - // finalization removes abandoned forks - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - None, - vec![0].into_iter().collect() - )), - ); - } - - #[test] - fn list_transaction_commit_works() { - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, - ) - .with_entry( - correct_id(6), - StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }, - ), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - - // when new block is appended to unfinalized fork - cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); - // when new entry is appended to unfinalized fork - cache.on_transaction_commit( - vec![CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 })] - .into(), - ); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); - assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); - // when new fork is added - cache.on_transaction_commit( - vec![CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 })] - .into(), - ); - assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); - assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); - // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit( - vec![CommitOperation::BlockFinalized( - correct_id(20), - Some(Entry { valid_from: correct_id(20), value: 20 }), - vec![0, 1, 2].into_iter().collect(), - )] - .into(), - ); - assert_eq!(cache.best_finalized_block, correct_id(20)); - assert_eq!( - cache.best_finalized_entry, - Some(Entry { valid_from: correct_id(20), value: 20 }) - ); - assert!(cache.unfinalized.is_empty()); - } - - #[test] - fn list_find_unfinalized_fork_works() { - // ----------> [3] - // --- [2] ---------> 4 ---> [5] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) - .with_entry( - fork_id(0, 1, 3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } - ) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), - correct_id(0) - ) - .unwrap() - .find_unfinalized_fork((&correct_id(4)).into()) - .unwrap() - .unwrap() - .head - .valid_from, - correct_id(5) - ); - // --- [2] ---------------> [5] - // ----------> [3] ---> 4 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry( - fork_id(0, 1, 3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } - ) - .with_entry( - correct_id(2), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } - ) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 2)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)), - PruningStrategy::ByDepth(1024), - correct_id(0) - ) - .unwrap() - .find_unfinalized_fork((&fork_id(0, 1, 4)).into()) - .unwrap() - .unwrap() - .head - .valid_from, - fork_id(0, 1, 3) - ); - // --- [2] ---------------> [5] - // ----------> [3] - // -----------------> 4 - assert!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry( - fork_id(0, 1, 3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } - ) - .with_entry( - correct_id(2), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } - ) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)) - .with_header(fork_header(1, 1, 2)) - .with_header(fork_header(1, 1, 3)) - .with_header(fork_header(1, 1, 4)), - PruningStrategy::ByDepth(1024), - correct_id(0) - ) - .unwrap() - .find_unfinalized_fork((&fork_id(1, 1, 4)).into()) - .unwrap() - .is_none()); - } - - #[test] - fn fork_matches_works() { - // when block is not within list range - let storage = DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .matches(&storage, (&test_id(20)).into()) - .unwrap(), - false - ); - // when block is not connected to the begin block - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&fork_id(0, 2, 4)).into()) - .unwrap(), - false - ); - // when block is not connected to the end block - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 3, 4)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&fork_id(0, 3, 4)).into()) - .unwrap(), - false - ); - // when block is connected to the begin block AND end is open - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) - .with_header(test_header(5)) - .with_header(test_header(6)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&correct_id(6)).into()) - .unwrap(), - true - ); - // when block is connected to the begin block AND to the end block - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&correct_id(4)).into()) - .unwrap(), - true - ); - } - - #[test] - fn fork_try_append_works() { - // when best block is unknown - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), - false - ); - // when best block is known but different - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(101)), - false - ); - // when best block is known and the same - assert_eq!( - Fork::<_, u64> { - best_block: Some(test_id(100)), - head: Entry { valid_from: test_id(100), value: 0 } - } - .try_append(&test_id(100)), - true - ); - } - - #[test] - fn fork_try_append_or_fork_works() { - // when there's no entry before parent - let storage = DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append_or_fork(&storage, &test_id(30), None) - .unwrap(), - None - ); - // when parent does not belong to the fork - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .try_append_or_fork(&storage, &fork_id(0, 2, 4), None) - .unwrap(), - None - ); - // when the entry before parent is the head entry - let storage = DummyStorage::new() - .with_entry( - ComplexBlockId::new(test_header(5).hash(), 5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_header(test_header(6)) - .with_header(test_header(5)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .try_append_or_fork(&storage, &correct_id(6), None) - .unwrap(), - Some(ForkAppendResult::Append) - ); - // when the parent located after last finalized entry - let storage = DummyStorage::new() - .with_entry( - correct_id(6), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(6), value: 100 } - } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), None) - .unwrap(), - Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3))) - ); - // when the parent located before last finalized entry - let storage = DummyStorage::new() - .with_entry( - correct_id(6), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(6), value: 100 } - } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)) - .unwrap(), - None - ); - } - - #[test] - fn fork_destroy_works() { - // when we reached finalized entry without iterations - let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)) - .unwrap(); - assert!(tx.removed_entries().is_empty()); - // when we reach finalized entry with iterations - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: 50 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: 10 }) - .with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: 5 }) - .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: 0 }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)) - .unwrap(); - assert_eq!( - *tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash, test_id(20).hash] - .into_iter() - .collect() - ); - // when we reach beginning of fork before finalized block - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)) - .unwrap(); - assert_eq!( - *tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash].into_iter().collect() - ); - } - - #[test] - fn is_connected_to_block_fails() { - // when storage returns error - assert!(chain::is_connected_to_block::<_, u64, _>( - &FaultyStorage, - (&test_id(1)).into(), - &test_id(100), - ) - .is_err(),); - // when there's no header in the storage - assert!(chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new(), - (&test_id(1)).into(), - &test_id(100), - ) - .is_err(),); - } - - #[test] - fn is_connected_to_block_works() { - // when without iterations we end up with different block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new().with_header(test_header(1)), - (&test_id(1)).into(), - &correct_id(1) - ) - .unwrap(), - false - ); - // when with ASC iterations we end up with different block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&test_id(0)).into(), - &correct_id(2) - ) - .unwrap(), - false - ); - // when with DESC iterations we end up with different block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), - &test_id(0) - ) - .unwrap(), - false - ); - // when without iterations we end up with the same block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new().with_header(test_header(1)), - (&correct_id(1)).into(), - &correct_id(1) - ) - .unwrap(), - true - ); - // when with ASC iterations we end up with the same block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(0)).into(), - &correct_id(2) - ) - .unwrap(), - true - ); - // when with DESC iterations we end up with the same block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), - &correct_id(0) - ) - .unwrap(), - true - ); - } - - #[test] - fn is_finalized_block_fails() { - // when storage returns error - assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); - } - - #[test] - fn is_finalized_block_works() { - // when number of block is larger than last finalized block - assert_eq!( - chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), - false - ); - // when there's no hash for this block number in the database - assert_eq!( - chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), - false - ); - // when there's different hash for this block number in the database - assert_eq!( - chain::is_finalized_block::<_, u64, _>( - &DummyStorage::new().with_id(1, H256::from_low_u64_be(2)), - &test_id(1), - 100 - ) - .unwrap(), - false - ); - // when there's the same hash for this block number in the database - assert_eq!( - chain::is_finalized_block::<_, u64, _>( - &DummyStorage::new().with_id(1, H256::from_low_u64_be(1)), - &test_id(1), - 100 - ) - .unwrap(), - true - ); - } - - #[test] - fn read_forks_fails() { - // when storage returns error during finalized entry read - assert!(read_forks::( - &FaultyStorage, - Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } - ) - .is_err()); - // when storage returns error during unfinalized entry read - assert!(read_forks::( - &FaultyStorage, - Metadata { finalized: None, unfinalized: vec![test_id(1)] } - ) - .is_err()); - // when finalized entry is not found - assert!(read_forks::( - &DummyStorage::new(), - Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } - ) - .is_err()); - // when unfinalized entry is not found - assert!(read_forks::( - &DummyStorage::new(), - Metadata { finalized: None, unfinalized: vec![test_id(1)] } - ) - .is_err()); - } - - #[test] - fn read_forks_works() { - let storage = DummyStorage::new() - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: 11 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: 0 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 33 }); - let expected = ( - Some(Entry { valid_from: test_id(10), value: 11 }), - vec![ - Fork { best_block: None, head: Entry { valid_from: test_id(20), value: 0 } }, - Fork { best_block: None, head: Entry { valid_from: test_id(30), value: 33 } }, - ], - ); - - assert_eq!( - expected, - read_forks( - &storage, - Metadata { - finalized: Some(test_id(10)), - unfinalized: vec![test_id(20), test_id(30)], - } - ) - .unwrap() - ); - } - - #[test] - fn ancient_entries_are_pruned_when_pruning_enabled() { - fn do_test(strategy: PruningStrategy) { - let cache = ListCache::new( - DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_id(20, H256::from_low_u64_be(20)) - .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) - .with_entry( - test_id(20), - StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }, - ) - .with_entry( - test_id(30), - StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }, - ), - strategy, - test_id(9), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - - // when finalizing entry #10: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(10)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #19: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(19)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #20: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(20)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is - // enabled) - cache.prune_finalized_entries(&mut tx, &test_id(30)); - match strategy { - PruningStrategy::NeverPrune => { - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - }, - PruningStrategy::ByDepth(_) => { - assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); - assert_eq!( - *tx.inserted_entries(), - vec![test_id(20).hash].into_iter().collect() - ); - }, - } - } - - do_test(PruningStrategy::ByDepth(10)); - do_test(PruningStrategy::NeverPrune) - } - - #[test] - fn revert_block_works() { - // 1 -> (2) -> 3 -> 4 -> 5 - // \ - // -> 5'' - // \ - // -> (3') -> 4' -> 5' - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta( - Some(correct_id(1)), - vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)], - ) - .with_id(1, correct_id(1).hash) - .with_entry(correct_id(1), StorageEntry { prev_valid_from: None, value: 1 }) - .with_entry( - correct_id(3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 3 }, - ) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 4 }, - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(4)), value: 5 }, - ) - .with_entry( - fork_id(1, 2, 4), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 14 }, - ) - .with_entry( - fork_id(1, 2, 5), - StorageEntry { prev_valid_from: Some(fork_id(1, 2, 4)), value: 15 }, - ) - .with_entry( - fork_id(2, 4, 5), - StorageEntry { prev_valid_from: Some(correct_id(4)), value: 25 }, - ) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(1, 2, 3)) - .with_header(fork_header(1, 2, 4)) - .with_header(fork_header(1, 2, 5)) - .with_header(fork_header(2, 4, 5)), - PruningStrategy::ByDepth(1024), - correct_id(1), - ) - .unwrap(); - - // when 5 is reverted: entry 5 is truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); - assert_eq!( - op, - CommitOperation::BlockReverted( - vec![( - 0, - Some(Fork { - best_block: None, - head: Entry { valid_from: correct_id(4), value: 4 } - }) - ),] - .into_iter() - .collect() - ) - ); - cache.on_transaction_commit(vec![op].into()); - - // when 3 is reverted: entries 4+5' are truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); - assert_eq!( - op, - CommitOperation::BlockReverted(vec![(0, None), (2, None),].into_iter().collect()) - ); - cache.on_transaction_commit(vec![op].into()); - - // when 2 is reverted: entries 4'+5' are truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![(0, None),].into_iter().collect())); - cache.on_transaction_commit(vec![op].into()); - } - - #[test] - fn append_commit_operation_works() { - let mut ops = CommitOperations::default(); - ops.append(None); - assert_eq!(ops.operations, Vec::new()); - - ops.append(Some(CommitOperation::BlockFinalized( - test_id(10), - Some(Entry { valid_from: test_id(10), value: 10 }), - vec![5].into_iter().collect(), - ))); - assert_eq!( - ops.operations, - vec![CommitOperation::BlockFinalized( - test_id(10), - Some(Entry { valid_from: test_id(10), value: 10 }), - vec![5].into_iter().collect(), - )], - ); - - ops.append(Some(CommitOperation::BlockFinalized( - test_id(20), - Some(Entry { valid_from: test_id(20), value: 20 }), - vec![5, 6].into_iter().collect(), - ))); - - assert_eq!( - ops.operations, - vec![CommitOperation::BlockFinalized( - test_id(20), - Some(Entry { valid_from: test_id(20), value: 20 }), - vec![5, 6].into_iter().collect(), - )], - ); - } -} diff --git a/client/db/src/cache/list_entry.rs b/client/db/src/cache/list_entry.rs deleted file mode 100644 index 7cee7a5146260..0000000000000 --- a/client/db/src/cache/list_entry.rs +++ /dev/null @@ -1,187 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! List-cache storage entries. - -use codec::{Decode, Encode}; -use sp_blockchain::Result as ClientResult; -use sp_runtime::traits::{Block as BlockT, NumberFor}; - -use crate::cache::{list_storage::Storage, CacheItemT, ComplexBlockId}; - -/// Single list-based cache entry. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct Entry { - /// first block, when this value became actual. - pub valid_from: ComplexBlockId, - /// Value stored at this entry. - pub value: T, -} - -/// Internal representation of the single list-based cache entry. The entry points to the -/// previous entry in the cache, allowing us to traverse back in time in list-style. -#[derive(Debug, Encode, Decode)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct StorageEntry { - /// None if valid from the beginning. - pub prev_valid_from: Option>, - /// Value stored at this entry. - pub value: T, -} - -impl Entry { - /// Returns Some if the entry should be updated with the new value. - pub fn try_update(&self, value: Option) -> Option> { - match value { - Some(value) => match self.value == value { - true => None, - false => - Some(StorageEntry { prev_valid_from: Some(self.valid_from.clone()), value }), - }, - None => None, - } - } - - /// Wrapper that calls search_before to get range where the given block fits. - pub fn search_best_range_before>( - &self, - storage: &S, - block: NumberFor, - ) -> ClientResult, Option>)>> { - Ok(self - .search_best_before(storage, block)? - .map(|(entry, next)| (entry.valid_from, next))) - } - - /// Searches the list, ending with THIS entry for the best entry preceding (or at) - /// given block number. - /// If the entry is found, result is the entry and the block id of next entry (if exists). - /// NOTE that this function does not check that the passed block is actually linked to - /// the blocks it found. - pub fn search_best_before>( - &self, - storage: &S, - block: NumberFor, - ) -> ClientResult, Option>)>> { - // we're looking for the best value - let mut next = None; - let mut current = self.valid_from.clone(); - if block >= self.valid_from.number { - let value = self.value.clone(); - return Ok(Some((Entry { valid_from: current, value }, next))) - } - - // else - travel back in time - loop { - let entry = storage.require_entry(¤t)?; - if block >= current.number { - return Ok(Some((Entry { valid_from: current, value: entry.value }, next))) - } - - next = Some(current); - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(None), - }; - } - } -} - -impl StorageEntry { - /// Converts storage entry into an entry, valid from given block. - pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { - Entry { valid_from, value: self.value } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; - use substrate_test_runtime_client::runtime::{Block, H256}; - - fn test_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(H256::from_low_u64_be(number), number) - } - - #[test] - fn entry_try_update_works() { - // when trying to update with None value - assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: 42 }.try_update(None), None); - // when trying to update with the same Some value - assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(1)), None); - // when trying to update with different Some value - assert_eq!( - Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 }) - ); - } - - #[test] - fn entry_search_best_before_fails() { - // when storage returns error - assert!(Entry::<_, u64> { valid_from: test_id(100), value: 42 } - .search_best_before(&FaultyStorage, 50) - .is_err()); - } - - #[test] - fn entry_search_best_before_works() { - // when block is better than our best block - assert_eq!( - Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new(), 150) - .unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None)) - ); - // when block is found between two entries - assert_eq!( - Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before( - &DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } - ) - .with_entry( - test_id(50), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 } - ), - 75 - ) - .unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100)))) - ); - // when block is not found - assert_eq!( - Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before( - &DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), - 30 - ) - .unwrap(), - None - ); - } -} diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs deleted file mode 100644 index bb47b8dab5a7f..0000000000000 --- a/client/db/src/cache/list_storage.rs +++ /dev/null @@ -1,441 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! List-cache storage definition and implementation. - -use std::sync::Arc; - -use crate::utils::{self, meta_keys}; -use codec::{Decode, Encode}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_database::{Database, Transaction}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; - -use crate::{ - cache::{ - list_cache::{CommitOperation, Fork}, - list_entry::{Entry, StorageEntry}, - CacheItemT, ComplexBlockId, - }, - DbHash, -}; - -/// Single list-cache metadata. -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct Metadata { - /// Block at which best finalized entry is stored. - pub finalized: Option>, - /// A set of blocks at which best unfinalized entries are stored. - pub unfinalized: Vec>, -} - -/// Readonly list-cache storage trait. -pub trait Storage { - /// Reads hash of the block at given number. - fn read_id(&self, at: NumberFor) -> ClientResult>; - - /// Reads header of the block with given hash. - fn read_header(&self, at: &Block::Hash) -> ClientResult>; - - /// Reads cache metadata: best finalized entry (if some) and the list. - fn read_meta(&self) -> ClientResult>; - - /// Reads cache entry from the storage. - fn read_entry( - &self, - at: &ComplexBlockId, - ) -> ClientResult>>; - - /// Reads referenced (and thus existing) cache entry from the storage. - fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { - self.read_entry(at).and_then(|entry| { - entry.ok_or_else(|| { - ClientError::from(ClientError::Backend(format!( - "Referenced cache entry at {:?} is not found", - at - ))) - }) - }) - } -} - -/// List-cache storage transaction. -pub trait StorageTransaction { - /// Insert storage entry at given block. - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry); - - /// Delete storage entry at given block. - fn remove_storage_entry(&mut self, at: &ComplexBlockId); - - /// Update metadata of the cache. - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ); -} - -/// A set of columns used by the DbStorage. -#[derive(Debug)] -pub struct DbColumns { - /// Column holding cache meta. - pub meta: u32, - /// Column holding the mapping of { block number => block hash } for blocks of the best chain. - pub key_lookup: u32, - /// Column holding the mapping of { block hash => block header }. - pub header: u32, - /// Column holding cache entries. - pub cache: u32, -} - -/// Database-backed list cache storage. -pub struct DbStorage { - name: Vec, - meta_key: Vec, - db: Arc>, - columns: DbColumns, -} - -impl DbStorage { - /// Create new database-backed list cache storage. - pub fn new(name: Vec, db: Arc>, columns: DbColumns) -> Self { - let meta_key = meta::key(&name); - DbStorage { name, meta_key, db, columns } - } - - /// Get reference to the database. - pub fn db(&self) -> &Arc> { - &self.db - } - - /// Get reference to the database columns. - pub fn columns(&self) -> &DbColumns { - &self.columns - } - - /// Encode block id for storing as a key in cache column. - /// We append prefix to the actual encoding to allow several caches - /// store entries in the same column. - pub fn encode_block_id(&self, block: &ComplexBlockId) -> Vec { - let mut encoded = self.name.clone(); - encoded.extend(block.hash.as_ref()); - encoded - } -} - -impl Storage for DbStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - utils::read_header::( - &*self.db, - self.columns.key_lookup, - self.columns.header, - BlockId::Number(at), - ) - .map(|maybe_header| maybe_header.map(|header| header.hash())) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - utils::read_header::( - &*self.db, - self.columns.key_lookup, - self.columns.header, - BlockId::Hash(*at), - ) - } - - fn read_meta(&self) -> ClientResult> { - match self.db.get(self.columns.meta, &self.meta_key) { - Some(meta) => meta::decode(&*meta), - None => Ok(Metadata { finalized: None, unfinalized: Vec::new() }), - } - } - - fn read_entry( - &self, - at: &ComplexBlockId, - ) -> ClientResult>> { - match self.db.get(self.columns.cache, &self.encode_block_id(at)) { - Some(entry) => StorageEntry::::decode(&mut &entry[..]) - .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) - .map(Some), - None => Ok(None), - } - } -} - -/// Database-backed list cache storage transaction. -pub struct DbStorageTransaction<'a> { - storage: &'a DbStorage, - tx: &'a mut Transaction, -} - -impl<'a> DbStorageTransaction<'a> { - /// Create new database transaction. - pub fn new(storage: &'a DbStorage, tx: &'a mut Transaction) -> Self { - DbStorageTransaction { storage, tx } - } -} - -impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { - self.tx.set_from_vec( - self.storage.columns.cache, - &self.storage.encode_block_id(at), - entry.encode(), - ); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.tx.remove(self.storage.columns.cache, &self.storage.encode_block_id(at)); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.tx.set_from_vec( - self.storage.columns.meta, - &self.storage.meta_key, - meta::encode(best_finalized_entry, unfinalized, operation), - ); - } -} - -/// Metadata related functions. -mod meta { - use super::*; - - /// Convert cache name into cache metadata key. - pub fn key(name: &[u8]) -> Vec { - let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec(); - key_name.extend_from_slice(name); - key_name - } - - /// Encode cache metadata 'applying' commit operation before encoding. - pub fn encode( - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - op: &CommitOperation, - ) -> Vec { - let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); - let mut unfinalized = - unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); - - match op { - CommitOperation::AppendNewBlock(_, _) => (), - CommitOperation::AppendNewEntry(index, ref entry) => { - unfinalized[*index] = &entry.valid_from; - }, - CommitOperation::AddNewFork(ref entry) => { - unfinalized.push(&entry.valid_from); - }, - CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { - if let Some(finalizing_entry) = finalizing_entry.as_ref() { - finalized = Some(&finalizing_entry.valid_from); - } - for fork_index in forks.iter().rev() { - unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(ref forks) => { - for (fork_index, updated_fork) in forks.iter().rev() { - match updated_fork { - Some(updated_fork) => - unfinalized[*fork_index] = &updated_fork.head().valid_from, - None => { - unfinalized.remove(*fork_index); - }, - } - } - }, - } - - (finalized, unfinalized).encode() - } - - /// Decode meta information. - pub fn decode(encoded: &[u8]) -> ClientResult> { - let input = &mut &*encoded; - let finalized: Option> = Decode::decode(input).map_err(|_| { - ClientError::from(ClientError::Backend("Error decoding cache meta".into())) - })?; - let unfinalized: Vec> = Decode::decode(input).map_err(|_| { - ClientError::from(ClientError::Backend("Error decoding cache meta".into())) - })?; - - Ok(Metadata { finalized, unfinalized }) - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use std::collections::{HashMap, HashSet}; - - pub struct FaultyStorage; - - impl Storage for FaultyStorage { - fn read_id(&self, _at: NumberFor) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_header(&self, _at: &Block::Hash) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_meta(&self) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_entry( - &self, - _at: &ComplexBlockId, - ) -> ClientResult>> { - Err(ClientError::Backend("TestError".into())) - } - } - - pub struct DummyStorage { - meta: Metadata, - ids: HashMap, Block::Hash>, - headers: HashMap, - entries: HashMap>, - } - - impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - meta: Metadata { finalized: None, unfinalized: Vec::new() }, - ids: HashMap::new(), - headers: HashMap::new(), - entries: HashMap::new(), - } - } - - pub fn with_meta( - mut self, - finalized: Option>, - unfinalized: Vec>, - ) -> Self { - self.meta.finalized = finalized; - self.meta.unfinalized = unfinalized; - self - } - - pub fn with_id(mut self, at: NumberFor, id: Block::Hash) -> Self { - self.ids.insert(at, id); - self - } - - pub fn with_header(mut self, header: Block::Header) -> Self { - self.headers.insert(header.hash(), header); - self - } - - pub fn with_entry( - mut self, - at: ComplexBlockId, - entry: StorageEntry, - ) -> Self { - self.entries.insert(at.hash, entry); - self - } - } - - impl Storage for DummyStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - Ok(self.ids.get(&at).cloned()) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - Ok(self.headers.get(&at).cloned()) - } - - fn read_meta(&self) -> ClientResult> { - Ok(self.meta.clone()) - } - - fn read_entry( - &self, - at: &ComplexBlockId, - ) -> ClientResult>> { - Ok(self.entries.get(&at.hash).cloned()) - } - } - - pub struct DummyTransaction { - updated_meta: Option>, - inserted_entries: HashSet, - removed_entries: HashSet, - } - - impl DummyTransaction { - pub fn new() -> Self { - DummyTransaction { - updated_meta: None, - inserted_entries: HashSet::new(), - removed_entries: HashSet::new(), - } - } - - pub fn inserted_entries(&self) -> &HashSet { - &self.inserted_entries - } - - pub fn removed_entries(&self) -> &HashSet { - &self.removed_entries - } - - pub fn updated_meta(&self) -> &Option> { - &self.updated_meta - } - } - - impl StorageTransaction for DummyTransaction { - fn insert_storage_entry( - &mut self, - at: &ComplexBlockId, - _entry: &StorageEntry, - ) { - self.inserted_entries.insert(at.hash); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.removed_entries.insert(at.hash); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.updated_meta = Some( - meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap(), - ); - } - } -} diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs deleted file mode 100644 index 5502896aced2c..0000000000000 --- a/client/db/src/cache/mod.rs +++ /dev/null @@ -1,413 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! DB-backed cache of blockchain data. - -use parking_lot::RwLock; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; - -use crate::{ - utils::{self, COLUMN_META}, - DbHash, -}; -use codec::{Decode, Encode}; -use sc_client_api::blockchain::{ - well_known_cache_keys::{self, Id as CacheKeyId}, - Cache as BlockchainCache, -}; -use sp_blockchain::{HeaderMetadataCache, Result as ClientResult}; -use sp_database::{Database, Transaction}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, -}; - -use self::list_cache::{ListCache, PruningStrategy}; - -mod list_cache; -mod list_entry; -mod list_storage; - -/// Minimal post-finalization age of finalized blocks before they'll pruned. -const PRUNE_DEPTH: u32 = 1024; - -/// The type of entry that is inserted to the cache. -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum EntryType { - /// Non-final entry. - NonFinal, - /// Final entry. - Final, - /// Genesis entry (inserted during cache initialization). - Genesis, -} - -/// Block identifier that holds both hash and number. -#[derive(Clone, Debug, Encode, Decode, PartialEq)] -pub struct ComplexBlockId { - /// Hash of the block. - pub(crate) hash: Block::Hash, - /// Number of the block. - pub(crate) number: NumberFor, -} - -impl ComplexBlockId { - /// Create new complex block id. - pub fn new(hash: Block::Hash, number: NumberFor) -> Self { - ComplexBlockId { hash, number } - } -} - -impl ::std::cmp::PartialOrd for ComplexBlockId { - fn partial_cmp(&self, other: &ComplexBlockId) -> Option<::std::cmp::Ordering> { - self.number.partial_cmp(&other.number) - } -} - -/// All cache items must implement this trait. -pub trait CacheItemT: Clone + Decode + Encode + PartialEq {} - -impl CacheItemT for T where T: Clone + Decode + Encode + PartialEq {} - -/// Database-backed blockchain data cache. -pub struct DbCache { - cache_at: HashMap, self::list_storage::DbStorage>>, - header_metadata_cache: Arc>, - db: Arc>, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - genesis_hash: Block::Hash, - best_finalized_block: ComplexBlockId, -} - -impl DbCache { - /// Create new cache. - pub fn new( - db: Arc>, - header_metadata_cache: Arc>, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - genesis_hash: Block::Hash, - best_finalized_block: ComplexBlockId, - ) -> Self { - Self { - cache_at: HashMap::new(), - db, - header_metadata_cache, - key_lookup_column, - header_column, - cache_column, - genesis_hash, - best_finalized_block, - } - } - - /// Set genesis block hash. - pub fn set_genesis_hash(&mut self, genesis_hash: Block::Hash) { - self.genesis_hash = genesis_hash; - } - - /// Begin cache transaction. - pub fn transaction<'a>( - &'a mut self, - tx: &'a mut Transaction, - ) -> DbCacheTransaction<'a, Block> { - DbCacheTransaction { - cache: self, - tx, - cache_at_ops: HashMap::new(), - best_finalized_block: None, - } - } - - /// Begin cache transaction with given ops. - pub fn transaction_with_ops<'a>( - &'a mut self, - tx: &'a mut Transaction, - ops: DbCacheTransactionOps, - ) -> DbCacheTransaction<'a, Block> { - DbCacheTransaction { - cache: self, - tx, - cache_at_ops: ops.cache_at_ops, - best_finalized_block: ops.best_finalized_block, - } - } - - /// Run post-commit cache operations. - pub fn commit(&mut self, ops: DbCacheTransactionOps) -> ClientResult<()> { - for (name, ops) in ops.cache_at_ops.into_iter() { - self.get_cache(name)?.on_transaction_commit(ops); - } - if let Some(best_finalized_block) = ops.best_finalized_block { - self.best_finalized_block = best_finalized_block; - } - Ok(()) - } - - /// Creates `ListCache` with the given name or returns a reference to the existing. - pub(crate) fn get_cache( - &mut self, - name: CacheKeyId, - ) -> ClientResult<&mut ListCache, self::list_storage::DbStorage>> { - get_cache_helper( - &mut self.cache_at, - name, - &self.db, - self.key_lookup_column, - self.header_column, - self.cache_column, - &self.best_finalized_block, - ) - } -} - -// This helper is needed because otherwise the borrow checker will require to -// clone all parameters outside of the closure. -fn get_cache_helper<'a, Block: BlockT>( - cache_at: &'a mut HashMap, self::list_storage::DbStorage>>, - name: CacheKeyId, - db: &Arc>, - key_lookup: u32, - header: u32, - cache: u32, - best_finalized_block: &ComplexBlockId, -) -> ClientResult<&'a mut ListCache, self::list_storage::DbStorage>> { - match cache_at.entry(name) { - Entry::Occupied(entry) => Ok(entry.into_mut()), - Entry::Vacant(entry) => { - let cache = ListCache::new( - self::list_storage::DbStorage::new( - name.to_vec(), - db.clone(), - self::list_storage::DbColumns { meta: COLUMN_META, key_lookup, header, cache }, - ), - cache_pruning_strategy(name), - best_finalized_block.clone(), - )?; - Ok(entry.insert(cache)) - }, - } -} - -/// Cache operations that are to be committed after database transaction is committed. -#[derive(Default)] -pub struct DbCacheTransactionOps { - cache_at_ops: HashMap>>, - best_finalized_block: Option>, -} - -impl DbCacheTransactionOps { - /// Empty transaction ops. - pub fn empty() -> DbCacheTransactionOps { - DbCacheTransactionOps { cache_at_ops: HashMap::new(), best_finalized_block: None } - } -} - -/// Database-backed blockchain data cache transaction valid for single block import. -pub struct DbCacheTransaction<'a, Block: BlockT> { - cache: &'a mut DbCache, - tx: &'a mut Transaction, - cache_at_ops: HashMap>>, - best_finalized_block: Option>, -} - -impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { - /// Convert transaction into post-commit operations set. - pub fn into_ops(self) -> DbCacheTransactionOps { - DbCacheTransactionOps { - cache_at_ops: self.cache_at_ops, - best_finalized_block: self.best_finalized_block, - } - } - - /// When new block is inserted into database. - pub fn on_block_insert( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId, - data_at: HashMap>, - entry_type: EntryType, - ) -> ClientResult { - // prepare list of caches that are not update - // (we might still need to do some cache maintenance in this case) - let missed_caches = self - .cache - .cache_at - .keys() - .filter(|cache| !data_at.contains_key(*cache)) - .cloned() - .collect::>(); - - let mut insert_op = |name: CacheKeyId, - value: Option>| - -> Result<(), sp_blockchain::Error> { - let cache = self.cache.get_cache(name)?; - let cache_ops = self.cache_at_ops.entry(name).or_default(); - cache.on_block_insert( - &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), - parent.clone(), - block.clone(), - value, - entry_type, - cache_ops, - )?; - - Ok(()) - }; - - data_at.into_iter().try_for_each(|(name, data)| insert_op(name, Some(data)))?; - missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?; - - match entry_type { - EntryType::Final | EntryType::Genesis => self.best_finalized_block = Some(block), - EntryType::NonFinal => (), - } - - Ok(self) - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId, - ) -> ClientResult { - for (name, cache) in self.cache.cache_at.iter() { - let cache_ops = self.cache_at_ops.entry(*name).or_default(); - cache.on_block_finalize( - &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), - parent.clone(), - block.clone(), - cache_ops, - )?; - } - - self.best_finalized_block = Some(block); - - Ok(self) - } - - /// When block is reverted. - pub fn on_block_revert(mut self, reverted_block: &ComplexBlockId) -> ClientResult { - for (name, cache) in self.cache.cache_at.iter() { - let cache_ops = self.cache_at_ops.entry(*name).or_default(); - cache.on_block_revert( - &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), - reverted_block, - cache_ops, - )?; - } - - Ok(self) - } -} - -/// Synchronous implementation of database-backed blockchain data cache. -pub struct DbCacheSync(pub RwLock>); - -impl BlockchainCache for DbCacheSync { - fn initialize(&self, key: &CacheKeyId, data: Vec) -> ClientResult<()> { - let mut cache = self.0.write(); - let genesis_hash = cache.genesis_hash; - let cache_contents = vec![(*key, data)].into_iter().collect(); - let db = cache.db.clone(); - let mut dbtx = Transaction::new(); - let tx = cache.transaction(&mut dbtx); - let tx = tx.on_block_insert( - ComplexBlockId::new(Default::default(), Zero::zero()), - ComplexBlockId::new(genesis_hash, Zero::zero()), - cache_contents, - EntryType::Genesis, - )?; - let tx_ops = tx.into_ops(); - db.commit(dbtx)?; - cache.commit(tx_ops)?; - - Ok(()) - } - - fn get_at( - &self, - key: &CacheKeyId, - at: &BlockId, - ) -> ClientResult< - Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, - > { - let mut cache = self.0.write(); - let header_metadata_cache = cache.header_metadata_cache.clone(); - let cache = cache.get_cache(*key)?; - let storage = cache.storage(); - let db = storage.db(); - let columns = storage.columns(); - let at = match *at { - BlockId::Hash(hash) => match header_metadata_cache.header_metadata(hash) { - Some(metadata) => ComplexBlockId::new(hash, metadata.number), - None => { - let header = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Hash(hash.clone()), - )?; - ComplexBlockId::new(hash, *header.number()) - }, - }, - BlockId::Number(number) => { - let hash = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Number(number.clone()), - )? - .hash(); - ComplexBlockId::new(hash, number) - }, - }; - - cache.value_at_block(&at).map(|block_and_value| { - block_and_value.map(|(begin_block, end_block, value)| { - ( - (begin_block.number, begin_block.hash), - end_block.map(|end_block| (end_block.number, end_block.hash)), - value, - ) - }) - }) - } -} - -/// Get pruning strategy for given cache. -fn cache_pruning_strategy>(cache: CacheKeyId) -> PruningStrategy { - // the cache is mostly used to store data from consensus engines - // this kind of data is only required for non-finalized blocks - // => by default we prune finalized cached entries - - match cache { - // we need to keep changes tries configurations forever (or at least until changes tries, - // that were built using this configuration, are pruned) to make it possible to refer - // to old changes tries - well_known_cache_keys::CHANGES_TRIE_CONFIG => PruningStrategy::NeverPrune, - _ => PruningStrategy::ByDepth(PRUNE_DEPTH.into()), - } -} diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs deleted file mode 100644 index 3a3c5918535f9..0000000000000 --- a/client/db/src/changes_tries_storage.rs +++ /dev/null @@ -1,1168 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! DB-backed changes tries storage. - -use crate::{ - cache::{ - ComplexBlockId, DbCache, DbCacheSync, DbCacheTransactionOps, EntryType as CacheEntryType, - }, - utils::{self, meta_keys, Meta}, - Database, DbHash, -}; -use codec::{Decode, Encode}; -use hash_db::Prefix; -use parking_lot::RwLock; -use sc_client_api::backend::PrunableStateChangesTrieStorage; -use sp_blockchain::{ - well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderMetadataCache, - Result as ClientResult, -}; -use sp_core::{ - convert_hash, storage::PrefixedStorageKey, ChangesTrieConfiguration, - ChangesTrieConfigurationRange, -}; -use sp_database::Transaction; -use sp_runtime::{ - generic::{BlockId, ChangesTrieSignal, DigestItem}, - traits::{Block as BlockT, CheckedSub, HashFor, Header as HeaderT, NumberFor, One, Zero}, -}; -use sp_state_machine::{ChangesTrieBuildCache, ChangesTrieCacheAction}; -use sp_trie::MemoryDB; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -/// Extract new changes trie configuration (if available) from the header. -pub fn extract_new_configuration( - header: &Header, -) -> Option<&Option> { - header - .digest() - .log(DigestItem::as_changes_trie_signal) - .and_then(ChangesTrieSignal::as_new_configuration) -} - -/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is -/// currently guaranteed because import lock is held during block import/finalization. -pub struct DbChangesTrieStorageTransaction { - /// Cache operations that must be performed after db transaction is committed. - cache_ops: DbCacheTransactionOps, - /// New configuration (if changed at current block). - new_config: Option>, -} - -impl DbChangesTrieStorageTransaction { - /// Consume self and return transaction with given new configuration. - pub fn with_new_config(mut self, new_config: Option>) -> Self { - self.new_config = new_config; - self - } -} - -impl From> for DbChangesTrieStorageTransaction { - fn from(cache_ops: DbCacheTransactionOps) -> Self { - DbChangesTrieStorageTransaction { cache_ops, new_config: None } - } -} - -/// Changes tries storage. -/// -/// Stores all tries in separate DB column. -/// Lock order: meta, tries_meta, cache, build_cache. -pub struct DbChangesTrieStorage { - db: Arc>, - meta_column: u32, - changes_tries_column: u32, - key_lookup_column: u32, - header_column: u32, - meta: Arc, Block::Hash>>>, - tries_meta: RwLock>, - min_blocks_to_keep: Option, - /// The cache stores all ever existing changes tries configurations. - cache: DbCacheSync, - /// Build cache is a map of block => set of storage keys changed at this block. - /// They're used to build digest blocks - instead of reading+parsing tries from db - /// we just use keys sets from the cache. - build_cache: RwLock>>, -} - -/// Persistent struct that contains all the changes tries metadata. -#[derive(Decode, Encode, Debug)] -struct ChangesTriesMeta { - /// Oldest unpruned max-level (or skewed) digest trie blocks range. - /// The range is inclusive from both sides. - /// Is None only if: - /// 1) we haven't yet finalized any blocks (except genesis) - /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are - /// disabled 3) changes tries pruning is disabled - pub oldest_digest_range: Option<(NumberFor, NumberFor)>, - /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. - /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if - /// created). - pub oldest_pruned_digest_range_end: NumberFor, -} - -impl DbChangesTrieStorage { - /// Create new changes trie storage. - pub fn new( - db: Arc>, - header_metadata_cache: Arc>, - meta_column: u32, - changes_tries_column: u32, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - meta: Arc, Block::Hash>>>, - min_blocks_to_keep: Option, - ) -> ClientResult { - let (finalized_hash, finalized_number, genesis_hash) = { - let meta = meta.read(); - (meta.finalized_hash, meta.finalized_number, meta.genesis_hash) - }; - let tries_meta = read_tries_meta(&*db, meta_column)?; - Ok(Self { - db: db.clone(), - meta_column, - changes_tries_column, - key_lookup_column, - header_column, - meta, - min_blocks_to_keep, - cache: DbCacheSync(RwLock::new(DbCache::new( - db.clone(), - header_metadata_cache, - key_lookup_column, - header_column, - cache_column, - genesis_hash, - ComplexBlockId::new(finalized_hash, finalized_number), - ))), - build_cache: RwLock::new(ChangesTrieBuildCache::new()), - tries_meta: RwLock::new(tries_meta), - }) - } - - /// Commit new changes trie. - pub fn commit( - &self, - tx: &mut Transaction, - mut changes_trie: MemoryDB>, - parent_block: ComplexBlockId, - block: ComplexBlockId, - new_header: &Block::Header, - finalized: bool, - new_configuration: Option>, - cache_tx: Option>, - ) -> ClientResult> { - // insert changes trie, associated with block, into DB - for (key, (val, _)) in changes_trie.drain() { - tx.set(self.changes_tries_column, key.as_ref(), &val); - } - - // if configuration has not been changed AND block is not finalized => nothing to do here - let new_configuration = match new_configuration { - Some(new_configuration) => new_configuration, - None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), - None => - return self.finalize( - tx, - parent_block.hash, - block.hash, - block.number, - Some(new_header), - cache_tx, - ), - }; - - // update configuration cache - let mut cache_at = HashMap::new(); - cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - Ok(DbChangesTrieStorageTransaction::from(match cache_tx { - Some(cache_tx) => self - .cache - .0 - .write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - None => self - .cache - .0 - .write() - .transaction(tx) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - }) - .with_new_config(Some(new_configuration))) - } - - /// Called when block is finalized. - pub fn finalize( - &self, - tx: &mut Transaction, - parent_block_hash: Block::Hash, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option>, - ) -> ClientResult> { - // prune obsolete changes tries - self.prune(tx, block_hash, block_num, new_header.clone(), cache_tx.as_ref())?; - - // if we have inserted the block that we're finalizing in the same transaction - // => then we have already finalized it from the commit() call - if cache_tx.is_some() { - if let Some(new_header) = new_header { - if new_header.hash() == block_hash { - return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")) - } - } - } - - // and finalize configuration cache entries - let block = ComplexBlockId::new(block_hash, block_num); - let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero()); - let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); - Ok(match cache_tx { - Some(cache_tx) => DbChangesTrieStorageTransaction::from( - self.cache - .0 - .write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_finalize(parent_block, block)? - .into_ops(), - ) - .with_new_config(cache_tx.new_config), - None => DbChangesTrieStorageTransaction::from( - self.cache - .0 - .write() - .transaction(tx) - .on_block_finalize(parent_block, block)? - .into_ops(), - ), - }) - } - - /// When block is reverted. - pub fn revert( - &self, - tx: &mut Transaction, - block: &ComplexBlockId, - ) -> ClientResult> { - Ok(self.cache.0.write().transaction(tx).on_block_revert(block)?.into_ops().into()) - } - - /// When transaction has been committed. - pub fn post_commit(&self, tx: Option>) { - if let Some(tx) = tx { - self.cache.0.write().commit(tx.cache_ops).expect( - "only fails if cache with given name isn't loaded yet; cache is already loaded \ - because there is tx; qed", - ); - } - } - - /// Commit changes into changes trie build cache. - pub fn commit_build_cache( - &self, - cache_update: ChangesTrieCacheAction>, - ) { - self.build_cache.write().perform(cache_update); - } - - /// Prune obsolete changes tries. - fn prune( - &self, - tx: &mut Transaction, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option<&DbChangesTrieStorageTransaction>, - ) -> ClientResult<()> { - // never prune on archive nodes - let min_blocks_to_keep = match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return Ok(()), - }; - - let mut tries_meta = self.tries_meta.write(); - let mut next_digest_range_start = block_num; - loop { - // prune oldest digest if it is known - // it could be unknown if: - // 1) either we're finalizing block#1 - // 2) or we are (or were) in period where changes tries are disabled - if let Some((begin, end)) = tries_meta.oldest_digest_range { - if block_num <= end || block_num - end <= min_blocks_to_keep.into() { - break - } - - tries_meta.oldest_pruned_digest_range_end = end; - sp_state_machine::prune_changes_tries( - &*self, - begin, - end, - &sp_state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.remove(self.changes_tries_column, node.as_ref()), - ); - - next_digest_range_start = end + One::one(); - } - - // proceed to the next configuration range - let next_digest_range_start_hash = match block_num == next_digest_range_start { - true => block_hash, - false => utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Number(next_digest_range_start), - )? - .hash(), - }; - - let config_for_new_block = new_header - .map(|header| *header.number() == next_digest_range_start) - .unwrap_or(false); - let next_config = match cache_tx { - Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { - let config = cache_tx.new_config.clone().expect("guarded by is_some(); qed"); - Ok(ChangesTrieConfigurationRange { - zero: (block_num, block_hash), - end: None, - config, - }) - }, - _ if config_for_new_block => self.configuration_at(&BlockId::Hash( - *new_header - .expect("config_for_new_block is only true when new_header is passed; qed") - .parent_hash(), - )), - _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash)), - }; - let next_config = match next_config { - Ok(next_config) => next_config, - Err(ClientError::UnknownBlock(_)) => break, // No block means nothing to prune. - Err(e) => return Err(e), - }; - if let Some(config) = next_config.config { - let mut oldest_digest_range = config - .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) - .unwrap_or_else(|| (next_digest_range_start, next_digest_range_start)); - - if let Some(end) = next_config.end { - if end.0 < oldest_digest_range.1 { - oldest_digest_range.1 = end.0; - } - } - - tries_meta.oldest_digest_range = Some(oldest_digest_range); - continue - } - - tries_meta.oldest_digest_range = None; - break - } - - write_tries_meta(tx, self.meta_column, &*tries_meta); - Ok(()) - } -} - -impl PrunableStateChangesTrieStorage for DbChangesTrieStorage { - fn storage( - &self, - ) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { - self - } - - fn configuration_at( - &self, - at: &BlockId, - ) -> ClientResult, Block::Hash>> { - self.cache - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? - .and_then(|(zero, end, encoded)| { - Decode::decode(&mut &encoded[..]) - .ok() - .map(|config| ChangesTrieConfigurationRange { zero, end, config }) - }) - .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) - } - - fn oldest_pruned_digest_range_end(&self) -> NumberFor { - self.tries_meta.read().oldest_pruned_digest_range_end - } -} - -impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> - for DbChangesTrieStorage -{ - fn build_anchor( - &self, - hash: Block::Hash, - ) -> Result>, String> { - utils::read_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Hash(hash), - ) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| { - maybe_header - .map(|header| sp_state_machine::ChangesTrieAnchorBlockId { - hash, - number: *header.number(), - }) - .ok_or_else(|| format!("Unknown header: {}", hash)) - }) - } - - fn root( - &self, - anchor: &sp_state_machine::ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - // check API requirement: we can't get NEXT block(s) based on anchor - if block > anchor.number { - return Err(format!( - "Can't get changes trie root at {} using anchor at {}", - block, anchor.number - )) - } - - // we need to get hash of the block to resolve changes trie root - let block_id = if block <= self.meta.read().finalized_number { - // if block is finalized, we could just read canonical hash - BlockId::Number(block) - } else { - // the block is not finalized - let mut current_num = anchor.number; - let mut current_hash: Block::Hash = convert_hash(&anchor.hash); - let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Number(current_num), - ) - .map_err(|e| e.to_string())?; - if maybe_anchor_header.hash() == current_hash { - // if anchor is canonicalized, then the block is also canonicalized - BlockId::Number(block) - } else { - // else (block is not finalized + anchor is not canonicalized): - // => we should find the required block hash by traversing - // back from the anchor to the block with given number - while current_num != block { - let current_header: Block::Header = utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Hash(current_hash), - ) - .map_err(|e| e.to_string())?; - - current_hash = *current_header.parent_hash(); - current_num = current_num - One::one(); - } - - BlockId::Hash(current_hash) - } - }; - - Ok(utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - block_id, - ) - .map_err(|e| e.to_string())? - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned()) - } -} - -impl sp_state_machine::ChangesTrieStorage, NumberFor> - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn as_roots_storage( - &self, - ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { - self - } - - fn with_cached_changed_keys( - &self, - root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap, HashSet>>), - ) -> bool { - self.build_cache.read().with_changed_keys(root, functor) - } - - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result>, String> { - Ok(self.db.get(self.changes_tries_column, key.as_ref())) - } -} - -/// Read changes tries metadata from database. -fn read_tries_meta( - db: &dyn Database, - meta_column: u32, -) -> ClientResult> { - match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { - Some(h) => Decode::decode(&mut &h[..]).map_err(|err| { - ClientError::Backend(format!("Error decoding changes tries metadata: {}", err)) - }), - None => Ok(ChangesTriesMeta { - oldest_digest_range: None, - oldest_pruned_digest_range_end: Zero::zero(), - }), - } -} - -/// Write changes tries metadata from database. -fn write_tries_meta( - tx: &mut Transaction, - meta_column: u32, - meta: &ChangesTriesMeta, -) { - tx.set_from_vec(meta_column, meta_keys::CHANGES_TRIES_META, meta.encode()); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - tests::{insert_header, prepare_changes, Block}, - Backend, - }; - use hash_db::EMPTY_PREFIX; - use sc_client_api::backend::{ - Backend as ClientBackend, BlockImportOperation, NewBlockState, - PrunableStateChangesTrieStorage, - }; - use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; - use sp_core::H256; - use sp_runtime::{ - testing::{Digest, Header}, - traits::{BlakeTwo256, Hash}, - }; - use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; - - fn changes(number: u64) -> Option, Vec)>> { - Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) - } - - fn insert_header_with_configuration_change( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Option, Vec)>>, - new_configuration: Option, - ) -> H256 { - let mut digest = Digest::default(); - let mut changes_trie_update = Default::default(); - if let Some(changes) = changes { - let (root, update) = prepare_changes(changes); - digest.push(DigestItem::ChangesTrieRoot(root)); - changes_trie_update = update; - } - digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - new_configuration, - ))); - - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new()), - digest, - extrinsics_root: Default::default(), - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) - .unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - } - - #[test] - fn changes_trie_storage_works() { - let backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.meta.write().finalized_number = 1000; - - let check_changes = |backend: &Backend, - block: u64, - changes: Vec<(Vec, Vec)>| { - let (changes_root, mut changes_trie_update) = prepare_changes(changes); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { - hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block, - }; - assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); - - let storage = backend.changes_tries_storage.storage(); - for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); - } - }; - - let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; - let changes1 = vec![ - (b"key_at_1".to_vec(), b"val_at_1".to_vec()), - (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), - ]; - let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - - let block0 = insert_header( - &backend, - 0, - Default::default(), - Some(changes0.clone()), - Default::default(), - ); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - // check that the storage contains tries for all blocks - check_changes(&backend, 0, changes0); - check_changes(&backend, 1, changes1); - check_changes(&backend, 2, changes2); - } - - #[test] - fn changes_trie_storage_works_with_forks() { - let backend = Backend::::new_test(1000, 100); - - let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; - let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; - let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header( - &backend, - 0, - Default::default(), - Some(changes0.clone()), - Default::default(), - ); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; - let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = - insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); - let block2_1_1 = - insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); - - let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; - let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = - insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); - let block2_2_1 = - insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); - - // finalize block1 - backend.changes_tries_storage.meta.write().finalized_number = 1; - - // branch1: when asking for finalized block hash - let (changes1_root, _) = prepare_changes(changes1); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch2: when asking for finalized block hash - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch1: when asking for non-finalized block hash (search by traversal) - let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); - - // branch2: when asking for non-finalized block hash (search using canonicalized hint) - let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // finalize first block of branch2 (block2_2_0) - backend.changes_tries_storage.meta.write().finalized_number = 3; - - // branch2: when asking for finalized block of this branch - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // branch1: when asking for finalized block of other branch - // => result is incorrect (returned for the block of branch1), but this is expected, - // because the other fork is abandoned (forked before finalized header) - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - } - - #[test] - fn changes_tries_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let parent_hash = |number| { - if number == 0 { - Default::default() - } else { - backend - .blockchain() - .header(BlockId::Number(number - 1)) - .unwrap() - .unwrap() - .hash() - } - }; - - let insert_regular_header = |with_changes, number| { - insert_header( - &backend, - number, - parent_hash(number), - if with_changes { changes(number) } else { None }, - Default::default(), - ); - }; - - let is_pruned = |number| { - let trie_root = backend - .blockchain() - .header(BlockId::Number(number)) - .unwrap() - .unwrap() - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned(); - match trie_root { - Some(trie_root) => - backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), - None => true, - } - }; - - let finalize_block = |number| { - let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); - let mut tx = Transaction::new(); - let cache_ops = backend - .changes_tries_storage - .finalize(&mut tx, *header.parent_hash(), header.hash(), number, None, None) - .unwrap(); - backend.storage.db.commit(tx).unwrap(); - backend.changes_tries_storage.post_commit(Some(cache_ops)); - }; - - // configuration ranges: - // (0; 6] - None - // [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17 - // [18; 21] - None - // [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32 - // [33; ... - Some(1) - let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2)); - let config_at_17 = None; - let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1)); - let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0)); - - (0..6).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); - (7..17).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change( - &backend, - 17, - parent_hash(17), - changes(17), - config_at_17, - ); - (18..21).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); - (22..32).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change( - &backend, - 32, - parent_hash(32), - changes(32), - config_at_32, - ); - (33..50).for_each(|number| insert_regular_header(true, number)); - - // when only genesis is finalized, nothing is pruned - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [1; 18] are finalized, nothing is pruned - (1..=18).for_each(|number| finalize_block(number)); - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 19 is finalized, changes tries for blocks [7; 10] are pruned - finalize_block(19); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [20; 22] are finalized, nothing is pruned - (20..=22).for_each(|number| finalize_block(number)); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 23 is finalized, changes tries for blocks [11; 14] are pruned - finalize_block(23); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [24; 25] are finalized, nothing is pruned - (24..=25).for_each(|number| finalize_block(number)); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 26 is finalized, changes tries for blocks [15; 17] are pruned - finalize_block(26); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [27; 37] are finalized, nothing is pruned - (27..=37).for_each(|number| finalize_block(number)); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 38 is finalized, changes tries for blocks [22; 29] are pruned - finalize_block(38); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [39; 40] are finalized, nothing is pruned - (39..=40).for_each(|number| finalize_block(number)); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 41 is finalized, changes tries for blocks [30; 32] are pruned - finalize_block(41); - (0..=32).for_each(|number| assert!(is_pruned(number))); - (33..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 42 is finalized, changes trie for block 33 is pruned - finalize_block(42); - (0..=33).for_each(|number| assert!(is_pruned(number))); - (34..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 43 is finalized, changes trie for block 34 is pruned - finalize_block(43); - (0..=34).for_each(|number| assert!(is_pruned(number))); - (35..50).for_each(|number| assert!(!is_pruned(number))); - } - - #[test] - fn changes_tries_configuration_is_updated_on_block_insert() { - let backend = Backend::::new_test(1000, 100); - - // configurations at blocks - let config_at_1 = Some(ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 }); - let config_at_3 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); - let config_at_5 = None; - let config_at_7 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = - insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = - insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); - let block4 = insert_header(&backend, 4, block3, None, Default::default()); - let block5 = - insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); - let block6 = insert_header(&backend, 6, block5, None, Default::default()); - let block7 = - insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); - - // test configuration cache - let storage = &backend.changes_tries_storage; - assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)).unwrap().config, - config_at_7.clone(), - ); - } - - #[test] - fn test_finalize_several_configuration_change_blocks_in_single_operation() { - let mut backend = Backend::::new_test(10, 10); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let configs = - (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); - - // insert unfinalized headers - let block0 = insert_header_with_configuration_change( - &backend, - 0, - Default::default(), - None, - configs[0].clone(), - ); - let block1 = insert_header_with_configuration_change( - &backend, - 1, - block0, - changes(1), - configs[1].clone(), - ); - let block2 = insert_header_with_configuration_change( - &backend, - 2, - block1, - changes(2), - configs[2].clone(), - ); - - let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); - let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); - let block2_1 = insert_header_with_configuration_change( - &backend, - 2, - block1, - changes(8), - side_config2_1.clone(), - ); - let _ = insert_header_with_configuration_change( - &backend, - 3, - block2_1, - changes(9), - side_config2_2.clone(), - ); - - // insert finalized header => 4 headers are finalized at once - let header3 = Header { - number: 3, - parent_hash: block2, - state_root: Default::default(), - digest: Digest { - logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - configs[3].clone(), - ))], - }, - extrinsics_root: Default::default(), - }; - let block3 = header3.hash(); - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); - op.mark_finalized(BlockId::Hash(block1), None).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - op.set_block_data(header3, None, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - - // insert more unfinalized headers - let block4 = insert_header_with_configuration_change( - &backend, - 4, - block3, - changes(4), - configs[4].clone(), - ); - let block5 = insert_header_with_configuration_change( - &backend, - 5, - block4, - changes(5), - configs[5].clone(), - ); - let block6 = insert_header_with_configuration_change( - &backend, - 6, - block5, - changes(6), - configs[6].clone(), - ); - - // insert finalized header => 4 headers are finalized at once - let header7 = Header { - number: 7, - parent_hash: block6, - state_root: Default::default(), - digest: Digest { - logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - configs[7].clone(), - ))], - }, - extrinsics_root: Default::default(), - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block6)).unwrap(); - op.mark_finalized(BlockId::Hash(block4), None).unwrap(); - op.mark_finalized(BlockId::Hash(block5), None).unwrap(); - op.mark_finalized(BlockId::Hash(block6), None).unwrap(); - op.set_block_data(header7, None, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - } - - #[test] - fn changes_tries_configuration_is_reverted() { - let backend = Backend::::new_test(10, 10); - - let config0 = Some(ChangesTrieConfiguration::new(2, 5)); - let block0 = - insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); - let config1 = Some(ChangesTrieConfiguration::new(2, 6)); - let block1 = - insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); - let just1 = Some((*b"TEST", vec![42])); - backend.finalize_block(BlockId::Number(1), just1).unwrap(); - let config2 = Some(ChangesTrieConfiguration::new(2, 7)); - let block2 = - insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); - let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); - let _ = - insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); - let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); - let block2_2 = - insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); - let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); - let _ = - insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); - - // before truncate there are 2 unfinalized forks - block2_1+block2_3 - assert_eq!( - backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 4], - ); - - // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 - backend.revert(1, false).unwrap(); - assert_eq!( - backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 3], - ); - - // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl - // specifics), the 1st one points to the block #3 because it isn't truncated - backend.revert(1, false).unwrap(); - assert_eq!( - backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 2], - ); - - // after truncating block2 - there are no unfinalized forks - backend.revert(1, false).unwrap(); - assert!(backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>() - .is_empty(),); - } -} diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3b8936c0f7bac..7d46b63da5bb4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -28,14 +28,11 @@ #![warn(missing_docs)] -pub mod light; pub mod offchain; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub mod bench; -mod cache; -mod changes_tries_storage; mod children; #[cfg(feature = "with-parity-db")] mod parity_db; @@ -56,7 +53,6 @@ use std::{ }; use crate::{ - changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}, stats::StateUsageStats, storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}, utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, @@ -64,8 +60,7 @@ use crate::{ use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ - backend::{NewBlockState, ProvideChtRoots, PrunableStateChangesTrieStorage}, - cht, + backend::NewBlockState, leaves::{FinalizationDisplaced, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, @@ -79,11 +74,10 @@ use sp_blockchain::{ use sp_core::{ offchain::OffchainOverlayedChange, storage::{well_known_keys, ChildInfo}, - ChangesTrieConfiguration, }; use sp_database::Transaction; use sp_runtime::{ - generic::{BlockId, DigestItem}, + generic::BlockId, traits::{ Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, @@ -91,9 +85,8 @@ use sp_runtime::{ Justification, Justifications, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, - ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, - StorageCollection, UsageInfo as StateUsageInfo, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, IndexOperation, + OffchainChangesCollection, StateMachineStats, StorageCollection, UsageInfo as StateUsageInfo, }; use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; @@ -104,7 +97,6 @@ pub use sp_database::Database; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub use bench::BenchmarkingState; -const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; const CACHE_HEADERS: usize = 8; /// Default value for storage cache child ratio. @@ -406,11 +398,9 @@ pub(crate) mod columns { pub const HEADER: u32 = 4; pub const BODY: u32 = 5; pub const JUSTIFICATIONS: u32 = 6; - pub const CHANGES_TRIE: u32 = 7; pub const AUX: u32 = 8; /// Offchain workers local storage pub const OFFCHAIN: u32 = 9; - pub const CACHE: u32 = 10; /// Transactions pub const TRANSACTION: u32 = 11; } @@ -506,13 +496,6 @@ impl BlockchainDb { let mut meta = self.meta.write(); meta.block_gap = gap; } - - // Get block changes trie root, if available. - fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block).map(|header| { - header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) - }) - } } impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { @@ -646,10 +629,6 @@ impl sc_client_api::blockchain::Backend for BlockchainDb Option>> { - None - } - fn leaves(&self) -> ClientResult> { Ok(self.leaves.read().hashes()) } @@ -702,12 +681,6 @@ impl sc_client_api::blockchain::Backend for BlockchainDb sc_client_api::blockchain::ProvideCache for BlockchainDb { - fn cache(&self) -> Option>> { - None - } -} - impl HeaderMetadata for BlockchainDb { type Error = sp_blockchain::Error; @@ -745,62 +718,6 @@ impl HeaderMetadata for BlockchainDb { } } -impl ProvideChtRoots for BlockchainDb { - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - let cht_number = match cht::block_to_cht_number(cht_size, block) { - Some(number) => number, - None => return Ok(None), - }; - - let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); - - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - - cht::compute_root::, _>( - cht::size(), - cht_number, - cht_range.map(|num| self.hash(num)), - ) - .map(Some) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - let cht_number = match cht::block_to_cht_number(cht_size, block) { - Some(number) => number, - None => return Ok(None), - }; - - let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); - - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - - cht::compute_root::, _>( - cht::size(), - cht_number, - cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), - ) - .map(Some) - } -} - /// Database transaction pub struct BlockImportOperation { old_state: SyncingCachingState, Block>, @@ -808,9 +725,6 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, offchain_storage_updates: OffchainChangesCollection, - changes_trie_updates: MemoryDB>, - changes_trie_build_cache_update: Option>>, - changes_trie_config_update: Option>, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, @@ -858,25 +772,12 @@ impl BlockImportOperation { ) }); - let mut changes_trie_config = None; let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| { - if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { - changes_trie_config = Some(Decode::decode(&mut &v[..])); - } - (&k[..], Some(&v[..])) - }), + storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))), child_delta, ); - let changes_trie_config = match changes_trie_config { - Some(Ok(c)) => Some(c), - Some(Err(_)) => return Err(sp_blockchain::Error::InvalidState.into()), - None => None, - }; - self.db_updates = transaction; - self.changes_trie_config_update = Some(changes_trie_config); Ok(root) } } @@ -899,11 +800,6 @@ impl sc_client_api::backend::BlockImportOperation leaf_state: NewBlockState, ) -> ClientResult<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - if let Some(changes_trie_config_update) = - changes_tries_storage::extract_new_configuration(&header) - { - self.changes_trie_config_update = Some(changes_trie_config_update.clone()); - } self.pending_block = Some(PendingBlock { header, body, indexed_body, justifications, leaf_state }); Ok(()) @@ -930,15 +826,6 @@ impl sc_client_api::backend::BlockImportOperation Ok(root) } - fn update_changes_trie( - &mut self, - update: ChangesTrieTransaction, NumberFor>, - ) -> ClientResult<()> { - self.changes_trie_updates = update.0; - self.changes_trie_build_cache_update = Some(update.1); - Ok(()) - } - fn insert_aux(&mut self, ops: I) -> ClientResult<()> where I: IntoIterator, Option>)>, @@ -1095,7 +982,6 @@ impl FrozenForDuration { pub struct Backend { storage: Arc>, offchain_storage: offchain::LocalStorage, - changes_tries_storage: DbChangesTrieStorage, blockchain: BlockchainDb, canonicalization_delay: u64, shared_cache: SharedCache, @@ -1155,7 +1041,6 @@ impl Backend { ) -> ClientResult { let is_archive_pruning = config.state_pruning.is_archive(); let blockchain = BlockchainDb::new(db.clone(), config.transaction_storage.clone())?; - let meta = blockchain.meta.clone(); let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e); let state_db: StateDb<_, _> = StateDb::new( config.state_pruning.clone(), @@ -1166,22 +1051,10 @@ impl Backend { let storage_db = StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() }; let offchain_storage = offchain::LocalStorage::new(db.clone()); - let changes_tries_storage = DbChangesTrieStorage::new( - db, - blockchain.header_metadata_cache.clone(), - columns::META, - columns::CHANGES_TRIE, - columns::KEY_LOOKUP, - columns::HEADER, - columns::CACHE, - meta, - if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, - )?; let backend = Backend { storage: Arc::new(storage_db), offchain_storage, - changes_tries_storage, blockchain, canonicalization_delay, shared_cache: new_shared_cache( @@ -1318,7 +1191,6 @@ impl Backend { header: &Block::Header, last_finalized: Option, justification: Option, - changes_trie_cache_ops: &mut Option>, finalization_displaced: &mut Option>>, ) -> ClientResult> { // TODO: ensure best chain contains this block. @@ -1326,15 +1198,7 @@ impl Backend { self.ensure_sequential_finalization(header, last_finalized)?; let with_state = sc_client_api::Backend::have_state_at(self, &hash, number); - self.note_finalized( - transaction, - false, - header, - *hash, - changes_trie_cache_ops, - finalization_displaced, - with_state, - )?; + self.note_finalized(transaction, header, *hash, finalization_displaced, with_state)?; if let Some(justification) = justification { transaction.set_from_vec( @@ -1400,7 +1264,6 @@ impl Backend { (meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap.clone()) }; - let mut changes_trie_cache_ops = None; for (block, justification) in operation.finalized_blocks { let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; @@ -1410,7 +1273,6 @@ impl Backend { &block_header, Some(last_finalized_hash), justification, - &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, )?); last_finalized_hash = block_hash; @@ -1475,11 +1337,6 @@ impl Backend { ); transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - // for tests, because config is set from within the reset_storage - if operation.changes_trie_config_update.is_none() { - operation.changes_trie_config_update = Some(None); - } - if operation.commit_state { transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key); } else { @@ -1578,7 +1435,6 @@ impl Backend { let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); - let changes_trie_updates = operation.changes_trie_updates; debug!(target: "db", "DB Commit {:?} ({}), best={}, state={}, existing={}", hash, number, is_best, operation.commit_state, existing_header, @@ -1593,10 +1449,8 @@ impl Backend { self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; self.note_finalized( &mut transaction, - true, header, hash, - &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, operation.commit_state, )?; @@ -1606,21 +1460,6 @@ impl Backend { } if !existing_header { - let changes_trie_config_update = operation.changes_trie_config_update; - changes_trie_cache_ops = Some(self.changes_tries_storage.commit( - &mut transaction, - changes_trie_updates, - cache::ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - cache::ComplexBlockId::new(hash, number), - header, - finalized, - changes_trie_config_update, - changes_trie_cache_ops, - )?); - { let mut leaves = self.blockchain.leaves.write(); leaves.import(hash, number, parent_hash); @@ -1747,11 +1586,6 @@ impl Backend { ); } - if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { - self.changes_tries_storage.commit_build_cache(changes_trie_build_cache_update); - } - self.changes_tries_storage.post_commit(changes_trie_cache_ops); - if let Some((enacted, retracted)) = cache_update { self.shared_cache.write().sync(&enacted, &retracted); } @@ -1770,10 +1604,8 @@ impl Backend { fn note_finalized( &self, transaction: &mut Transaction, - is_inserted: bool, f_header: &Block::Header, f_hash: Block::Hash, - changes_trie_cache_ops: &mut Option>, displaced: &mut Option>>, with_state: bool, ) -> ClientResult<()> { @@ -1798,18 +1630,6 @@ impl Backend { apply_state_commit(transaction, commit); } - if !f_num.is_zero() { - let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( - transaction, - *f_header.parent_hash(), - f_hash, - f_num, - if is_inserted { Some(&f_header) } else { None }, - changes_trie_cache_ops.take(), - )?; - *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); - } - let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); self.prune_blocks(transaction, f_num, &new_displaced)?; match displaced { @@ -2036,9 +1856,6 @@ impl sc_client_api::backend::Backend for Backend { storage_updates: Default::default(), child_storage_updates: Default::default(), offchain_storage_updates: Default::default(), - changes_trie_config_update: None, - changes_trie_updates: MemoryDB::default(), - changes_trie_build_cache_update: None, aux_ops: Vec::new(), finalized_blocks: Vec::new(), set_head: None, @@ -2089,19 +1906,16 @@ impl sc_client_api::backend::Backend for Backend { let header = self.blockchain.expect_header(block)?; let mut displaced = None; - let mut changes_trie_cache_ops = None; let m = self.finalize_block_with_transaction( &mut transaction, &hash, &header, None, justification, - &mut changes_trie_cache_ops, &mut displaced, )?; self.storage.db.commit(transaction)?; self.blockchain.update_meta(m); - self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) } @@ -2148,10 +1962,6 @@ impl sc_client_api::backend::Backend for Backend { Ok(()) } - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { - Some(&self.changes_tries_storage) - } - fn offchain_storage(&self) -> Option { Some(self.offchain_storage.clone()) } @@ -2208,7 +2018,6 @@ impl sc_client_api::backend::Backend for Backend { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); - let removed_number = best_number; let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!( @@ -2241,10 +2050,6 @@ impl sc_client_api::backend::Backend for Backend { let key = utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; - let changes_trie_cache_ops = self.changes_tries_storage.revert( - &mut transaction, - &cache::ComplexBlockId::new(removed.hash(), removed_number), - )?; if update_finalized { transaction.set_from_vec( columns::META, @@ -2283,7 +2088,6 @@ impl sc_client_api::backend::Backend for Backend { best_hash, ); self.storage.db.commit(transaction)?; - self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); self.blockchain.update_meta(MetaUpdate { hash: best_hash, number: best_number, @@ -2345,11 +2149,6 @@ impl sc_client_api::backend::Backend for Backend { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); - let changes_trie_cache_ops = self - .changes_tries_storage - .revert(&mut transaction, &cache::ComplexBlockId::new(*hash, hdr.number))?; - - self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); leaves.revert(hash.clone(), hdr.number); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); self.storage.db.commit(transaction)?; @@ -2461,32 +2260,16 @@ pub(crate) mod tests { use sp_blockchain::{lowest_common_ancestor, tree_route}; use sp_core::H256; use sp_runtime::{ - generic::DigestItem, testing::{Block as RawBlock, ExtrinsicWrapper, Header}, traits::{BlakeTwo256, Hash}, ConsensusEngineId, }; - use sp_state_machine::{TrieDBMut, TrieMut}; const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; pub(crate) type Block = RawBlock>; - pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { - let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::default(); - { - let mut trie = - TrieDBMut::::new(&mut changes_trie_update, &mut changes_root); - for (key, value) in changes { - trie.insert(&key, &value).unwrap(); - } - } - - (changes_root, changes_trie_update) - } - pub fn insert_header( backend: &Backend, number: u64, @@ -2501,20 +2284,14 @@ pub(crate) mod tests { backend: &Backend, number: u64, parent_hash: H256, - changes: Option, Vec)>>, + _changes: Option, Vec)>>, extrinsics_root: H256, body: Vec>, transaction_index: Option>, ) -> H256 { use sp_runtime::testing::Digest; - let mut digest = Digest::default(); - let mut changes_trie_update = Default::default(); - if let Some(changes) = changes { - let (root, update) = prepare_changes(changes); - digest.push(DigestItem::ChangesTrieRoot(root)); - changes_trie_update = update; - } + let digest = Digest::default(); let header = Header { number, parent_hash, @@ -2535,8 +2312,6 @@ pub(crate) mod tests { if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); } - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) - .unwrap(); backend.commit_operation(op).unwrap(); header_hash @@ -3241,38 +3016,6 @@ pub(crate) mod tests { } } - #[test] - fn header_cht_root_works() { - use sc_client_api::ProvideChtRoots; - - let backend = Backend::::new_test(10, 10); - - // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = - insert_header(&backend, 0, Default::default(), None, Default::default()); - let cht_size: u64 = cht::size(); - for i in 1..1 + cht_size + cht_size + 1 { - prev_hash = insert_header(&backend, i, prev_hash, None, Default::default()); - } - - let blockchain = backend.blockchain(); - - let cht_root_1 = blockchain - .header_cht_root(cht_size, cht::start_number(cht_size, 0)) - .unwrap() - .unwrap(); - let cht_root_2 = blockchain - .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap() - .unwrap(); - let cht_root_3 = blockchain - .header_cht_root(cht_size, cht::end_number(cht_size, 0)) - .unwrap() - .unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - } - #[test] fn prune_blocks_on_finalize() { for storage in &[TransactionStorageMode::BlockBody, TransactionStorageMode::StorageChain] { diff --git a/client/db/src/light.rs b/client/db/src/light.rs deleted file mode 100644 index 48cf0489cf2a0..0000000000000 --- a/client/db/src/light.rs +++ /dev/null @@ -1,1329 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! RocksDB-based light client blockchain storage. - -use parking_lot::RwLock; -use std::{collections::HashMap, convert::TryInto, sync::Arc}; - -use crate::{ - cache::{ComplexBlockId, DbCache, DbCacheSync, EntryType as CacheEntryType}, - utils::{self, block_id_to_lookup_key, meta_keys, read_db, read_meta, DatabaseType, Meta}, - DatabaseSettings, DbHash, FrozenForDuration, -}; -use codec::{Decode, Encode}; -use log::{debug, trace, warn}; -use sc_client_api::{ - backend::{AuxStore, NewBlockState, ProvideChtRoots}, - blockchain::{BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo}, - cht, Storage, UsageInfo, -}; -use sp_blockchain::{ - well_known_cache_keys, CachedHeaderMetadata, Error as ClientError, - HeaderBackend as BlockchainHeaderBackend, HeaderMetadata, HeaderMetadataCache, - Result as ClientResult, -}; -use sp_database::{Database, Transaction}; -use sp_runtime::{ - generic::{BlockId, DigestItem}, - traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, Zero}, -}; - -pub(crate) mod columns { - pub const META: u32 = crate::utils::COLUMN_META; - pub const KEY_LOOKUP: u32 = 1; - pub const HEADER: u32 = 2; - pub const CACHE: u32 = 3; - pub const CHT: u32 = 4; - pub const AUX: u32 = 5; -} - -/// Prefix for headers CHT. -const HEADER_CHT_PREFIX: u8 = 0; -/// Prefix for changes tries roots CHT. -const CHANGES_TRIE_CHT_PREFIX: u8 = 1; - -/// Light blockchain storage. Stores most recent headers + CHTs for older headers. -/// Locks order: meta, cache. -pub struct LightStorage { - db: Arc>, - meta: RwLock, Block::Hash>>, - cache: Arc>, - header_metadata_cache: Arc>, - io_stats: FrozenForDuration, -} - -impl LightStorage { - /// Create new storage with given settings. - pub fn new(config: DatabaseSettings) -> ClientResult { - let db = crate::utils::open_database::(&config, DatabaseType::Light)?; - Self::from_kvdb(db as Arc<_>) - } - - /// Create new memory-backed `LightStorage` for tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test() -> Self { - let db = Arc::new(sp_database::MemDb::default()); - Self::from_kvdb(db as Arc<_>).expect("failed to create test-db") - } - - fn from_kvdb(db: Arc>) -> ClientResult { - let meta = read_meta::(&*db, columns::HEADER)?; - let header_metadata_cache = Arc::new(HeaderMetadataCache::default()); - let cache = DbCache::new( - db.clone(), - header_metadata_cache.clone(), - columns::KEY_LOOKUP, - columns::HEADER, - columns::CACHE, - meta.genesis_hash, - ComplexBlockId::new(meta.finalized_hash, meta.finalized_number), - ); - - Ok(LightStorage { - db, - meta: RwLock::new(meta), - cache: Arc::new(DbCacheSync(RwLock::new(cache))), - header_metadata_cache, - io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), - }) - } - - #[cfg(test)] - pub(crate) fn cache(&self) -> &DbCacheSync { - &self.cache - } - - fn update_meta( - &self, - hash: Block::Hash, - number: NumberFor, - is_best: bool, - is_finalized: bool, - ) { - let mut meta = self.meta.write(); - - if number.is_zero() { - meta.genesis_hash = hash; - meta.finalized_hash = hash; - } - - if is_best { - meta.best_number = number; - meta.best_hash = hash; - } - - if is_finalized { - meta.finalized_number = number; - meta.finalized_hash = hash; - } - } -} - -impl BlockchainHeaderBackend for LightStorage -where - Block: BlockT, -{ - fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - } - - fn info(&self) -> BlockchainInfo { - let meta = self.meta.read(); - BlockchainInfo { - best_hash: meta.best_hash, - best_number: meta.best_number, - genesis_hash: meta.genesis_hash.clone(), - finalized_hash: meta.finalized_hash, - finalized_number: meta.finalized_number, - finalized_state: if meta.finalized_hash != Default::default() { - Some((meta.genesis_hash, Zero::zero())) - } else { - None - }, - number_leaves: 1, - block_gap: None, - } - } - - fn status(&self, id: BlockId) -> ClientResult { - let exists = match id { - BlockId::Hash(_) => - read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some(), - BlockId::Number(n) => n <= self.meta.read().best_number, - }; - match exists { - true => Ok(BlockStatus::InChain), - false => Ok(BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(lookup_key) = - block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? - { - let number = utils::lookup_key_to_number(&lookup_key)?; - Ok(Some(number)) - } else { - Ok(None) - } - } - - fn hash(&self, number: NumberFor) -> ClientResult> { - Ok(self.header(BlockId::Number(number))?.map(|header| header.hash().clone())) - } -} - -impl HeaderMetadata for LightStorage { - type Error = ClientError; - - fn header_metadata( - &self, - hash: Block::Hash, - ) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).map_or_else( - || { - self.header(BlockId::hash(hash))? - .map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache - .insert_header_metadata(header_metadata.hash, header_metadata.clone()); - header_metadata - }) - .ok_or_else(|| { - ClientError::UnknownBlock(format!("header not found in db: {}", hash)) - }) - }, - Ok, - ) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.header_metadata_cache.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.header_metadata_cache.remove_header_metadata(hash); - } -} - -impl LightStorage { - // Get block changes trie root, if available. - fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block).map(|header| { - header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) - }) - } - - /// Handle setting head within a transaction. `route_to` should be the last - /// block that existed in the database. `best_to` should be the best block - /// to be set. - /// - /// In the case where the new best block is a block to be imported, `route_to` - /// should be the parent of `best_to`. In the case where we set an existing block - /// to be best, `route_to` should equal to `best_to`. - fn set_head_with_transaction( - &self, - transaction: &mut Transaction, - route_to: Block::Hash, - best_to: (NumberFor, Block::Hash), - ) -> ClientResult<()> { - let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; - - // handle reorg. - let meta = self.meta.read(); - if meta.best_hash != Default::default() { - let tree_route = sp_blockchain::tree_route(self, meta.best_hash, route_to)?; - - // update block number to hash lookup entries. - for retracted in tree_route.retracted() { - if retracted.hash == meta.finalized_hash { - // TODO: can we recover here? - warn!( - "Safety failure: reverting finalized block {:?}", - (&retracted.number, &retracted.hash) - ); - } - - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - retracted.number, - )?; - } - - for enacted in tree_route.enacted() { - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - enacted.number, - enacted.hash, - )?; - } - } - - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - best_to.0, - best_to.1, - )?; - - Ok(()) - } - - // Note that a block is finalized. Only call with child of last finalized block. - fn note_finalized( - &self, - transaction: &mut Transaction, - header: &Block::Header, - hash: Block::Hash, - ) -> ClientResult<()> { - let meta = self.meta.read(); - if &meta.finalized_hash != header.parent_hash() { - return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( - "Last finalized {:?} not parent of {:?}", - meta.finalized_hash, hash - )) - .into()) - } - - let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?; - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - - // build new CHT(s) if required - if let Some(new_cht_number) = cht::is_build_required(cht::size(), *header.number()) { - let new_cht_start: NumberFor = cht::start_number(cht::size(), new_cht_number); - - let mut current_num = new_cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - - let new_header_cht_root = cht::compute_root::, _>( - cht::size(), - new_cht_number, - cht_range.map(|num| self.hash(num)), - )?; - transaction.set( - columns::CHT, - &cht_key(HEADER_CHT_PREFIX, new_cht_start)?, - new_header_cht_root.as_ref(), - ); - - // if the header includes changes trie root, let's build a changes tries roots CHT - if header.digest().log(DigestItem::as_changes_trie_root).is_some() { - let mut current_num = new_cht_start; - let cht_range = std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let new_changes_trie_cht_root = - cht::compute_root::, _>( - cht::size(), - new_cht_number, - cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), - )?; - transaction.set( - columns::CHT, - &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start)?, - new_changes_trie_cht_root.as_ref(), - ); - } - - // prune headers that are replaced with CHT - let mut prune_block = new_cht_start; - let new_cht_end = cht::end_number(cht::size(), new_cht_number); - trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", - new_cht_start, new_cht_end, new_cht_number); - - while prune_block <= new_cht_end { - if let Some(hash) = self.hash(prune_block)? { - let lookup_key = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Number(prune_block))? - .expect("retrieved hash for `prune_block` right above. therefore retrieving lookup key must succeed. q.e.d."); - utils::remove_key_mappings( - transaction, - columns::KEY_LOOKUP, - prune_block, - hash, - )?; - transaction.remove(columns::HEADER, &lookup_key); - } - prune_block += One::one(); - } - } - - Ok(()) - } - - /// Read CHT root of given type for the block. - fn read_cht_root( - &self, - cht_type: u8, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - let no_cht_for_block = || ClientError::Backend(format!("Missing CHT for block {}", block)); - - let meta = self.meta.read(); - let max_cht_number = cht::max_cht_number(cht_size, meta.finalized_number); - let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?; - match max_cht_number { - Some(max_cht_number) if cht_number <= max_cht_number => (), - _ => return Ok(None), - } - - let cht_start = cht::start_number(cht_size, cht_number); - self.db - .get(columns::CHT, &cht_key(cht_type, cht_start)?) - .ok_or_else(no_cht_for_block) - .and_then(|hash| Block::Hash::decode(&mut &*hash).map_err(|_| no_cht_for_block())) - .map(Some) - } -} - -impl AuxStore for LightStorage -where - Block: BlockT, -{ - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - insert: I, - delete: D, - ) -> ClientResult<()> { - let mut transaction = Transaction::new(); - for (k, v) in insert { - transaction.set(columns::AUX, k, v); - } - for k in delete { - transaction.remove(columns::AUX, k); - } - self.db.commit(transaction)?; - - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.db.get(columns::AUX, key)) - } -} - -impl Storage for LightStorage -where - Block: BlockT, -{ - fn import_header( - &self, - header: Block::Header, - mut cache_at: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - let mut transaction = Transaction::new(); - - let hash = header.hash(); - let number = *header.number(); - let parent_hash = *header.parent_hash(); - - for (key, maybe_val) in aux_ops { - match maybe_val { - Some(val) => transaction.set_from_vec(columns::AUX, &key, val), - None => transaction.remove(columns::AUX, &key), - } - } - - // blocks are keyed by number + hash. - let lookup_key = utils::number_and_hash_to_lookup_key(number, &hash)?; - - if leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; - } - - utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; - transaction.set_from_vec(columns::HEADER, &lookup_key, header.encode()); - - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache - .insert_header_metadata(header.hash().clone(), header_metadata); - - let is_genesis = number.is_zero(); - if is_genesis { - self.cache.0.write().set_genesis_hash(hash); - transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - } - - let finalized = match leaf_state { - _ if is_genesis => true, - NewBlockState::Final => true, - _ => false, - }; - - if finalized { - self.note_finalized(&mut transaction, &header, hash)?; - } - - // update changes trie configuration cache - if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { - if let Some(new_configuration) = - crate::changes_tries_storage::extract_new_configuration(&header) - { - cache_at - .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - } - } - - { - let mut cache = self.cache.0.write(); - let cache_ops = cache - .transaction(&mut transaction) - .on_block_insert( - ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - ComplexBlockId::new(hash, number), - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(); - - debug!("Light DB Commit {:?} ({})", hash, number); - - self.db.commit(transaction)?; - cache.commit(cache_ops).expect( - "only fails if cache with given name isn't loaded yet; cache is already loaded \ - because there are cache_ops; qed", - ); - } - - self.update_meta(hash, number, leaf_state.is_best(), finalized); - - Ok(()) - } - - fn set_head(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let hash = header.hash(); - let number = header.number(); - - let mut transaction = Transaction::new(); - self.set_head_with_transaction( - &mut transaction, - hash.clone(), - (number.clone(), hash.clone()), - )?; - self.db.commit(transaction)?; - self.update_meta(hash, header.number().clone(), true, false); - - Ok(()) - } else { - Err(ClientError::UnknownBlock(format!("Cannot set head {:?}", id))) - } - } - - fn finalize_header(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let mut transaction = Transaction::new(); - let hash = header.hash(); - let number = *header.number(); - self.note_finalized(&mut transaction, &header, hash.clone())?; - { - let mut cache = self.cache.0.write(); - let cache_ops = cache - .transaction(&mut transaction) - .on_block_finalize( - ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - ComplexBlockId::new(hash, number), - )? - .into_ops(); - - self.db.commit(transaction)?; - cache.commit(cache_ops).expect( - "only fails if cache with given name isn't loaded yet; cache is already loaded \ - because there are cache_ops; qed", - ); - } - self.update_meta(hash, header.number().clone(), false, true); - - Ok(()) - } else { - Err(ClientError::UnknownBlock(format!("Cannot finalize block {:?}", id))) - } - } - - fn last_finalized(&self) -> ClientResult { - Ok(self.meta.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - Some(self.cache.clone()) - } - - fn usage_info(&self) -> Option { - use sc_client_api::{IoInfo, MemoryInfo, MemorySize}; - - // TODO: reimplement IO stats - let database_cache = MemorySize::from_bytes(0); - let io_stats = self.io_stats.take_or_else(|| kvdb::IoStats::empty()); - - Some(UsageInfo { - memory: MemoryInfo { - database_cache, - state_cache: Default::default(), - state_db: Default::default(), - }, - io: IoInfo { - transactions: io_stats.transactions, - bytes_read: io_stats.bytes_read, - bytes_written: io_stats.bytes_written, - writes: io_stats.writes, - reads: io_stats.reads, - average_transaction_size: io_stats.avg_transaction_size() as u64, - // Light client does not track those - state_reads: 0, - state_writes: 0, - state_reads_cache: 0, - state_writes_cache: 0, - state_writes_nodes: 0, - }, - }) - } -} - -impl ProvideChtRoots for LightStorage -where - Block: BlockT, -{ - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) - } -} - -/// Build the key for inserting header-CHT at given block. -fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { - let mut key = [cht_type; 5]; - key[1..].copy_from_slice(&utils::number_index_key(block)?); - Ok(key) -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use sc_client_api::cht; - use sp_blockchain::{lowest_common_ancestor, tree_route}; - use sp_core::ChangesTrieConfiguration; - use sp_runtime::{ - generic::{ChangesTrieSignal, DigestItem}, - testing::{Block as RawBlock, ExtrinsicWrapper, Header, H256 as Hash}, - }; - - type Block = RawBlock>; - type AuthorityId = sp_core::ed25519::Public; - - pub fn default_header(parent: &Hash, number: u64) -> Header { - Header { - number: number.into(), - parent_hash: *parent, - state_root: Hash::random(), - digest: Default::default(), - extrinsics_root: Default::default(), - } - } - - fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { - let mut header = default_header(parent, number); - header - .digest - .logs - .push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); - header - } - - fn header_with_extrinsics_root(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header { - let mut header = default_header(parent, number); - header.extrinsics_root = extrinsics_root; - header - } - - pub fn insert_block Header>( - db: &LightStorage, - cache: HashMap>, - mut header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Best, Vec::new()).unwrap(); - hash - } - - fn insert_final_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Final, Vec::new()).unwrap(); - hash - } - - fn insert_non_best_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Normal, Vec::new()).unwrap(); - hash - } - - #[test] - fn returns_known_header() { - let db = LightStorage::new_test(); - let known_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); - let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); - assert_eq!(header_by_hash, header_by_number); - } - - #[test] - fn does_not_return_unknown_header() { - let db = LightStorage::::new_test(); - assert!(db.header(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap().is_none()); - assert!(db.header(BlockId::Number(0)).unwrap().is_none()); - } - - #[test] - fn returns_info() { - let db = LightStorage::new_test(); - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let info = db.info(); - assert_eq!(info.best_hash, genesis_hash); - assert_eq!(info.best_number, 0); - assert_eq!(info.genesis_hash, genesis_hash); - let best_hash = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - let info = db.info(); - assert_eq!(info.best_hash, best_hash); - assert_eq!(info.best_number, 1); - assert_eq!(info.genesis_hash, genesis_hash); - } - - #[test] - fn returns_block_status() { - let db = LightStorage::new_test(); - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.status(BlockId::Hash(genesis_hash)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); - assert_eq!( - db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), - BlockStatus::Unknown - ); - assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); - } - - #[test] - fn returns_block_hash() { - let db = LightStorage::new_test(); - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); - assert_eq!(db.hash(1).unwrap(), None); - } - - #[test] - fn import_header_works() { - let raw_db = Arc::new(sp_database::MemDb::default()); - let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(raw_db.count(columns::HEADER), 1); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), 2); - - let _ = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - assert_eq!(raw_db.count(columns::HEADER), 2); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), 4); - } - - #[test] - fn finalized_ancient_headers_are_replaced_with_cht() { - fn insert_headers Header>( - header_producer: F, - ) -> (Arc, LightStorage) { - let raw_db = Arc::new(sp_database::MemDb::default()); - let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - let cht_size: u64 = cht::size(); - let ucht_size: usize = cht_size as _; - - // insert genesis block header (never pruned) - let mut prev_hash = - insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); - - // insert SIZE blocks && ensure that nothing is pruned - - for number in 0..cht::size() { - prev_hash = - insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); - } - assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // insert next SIZE blocks && ensure that nothing is pruned - for number in 0..(cht_size as _) { - prev_hash = insert_block(&db, HashMap::new(), || { - header_producer(&prev_hash, 1 + cht_size + number) - }); - } - assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of - // this CHT are pruned nothing is yet finalized, so nothing is pruned. - prev_hash = insert_block(&db, HashMap::new(), || { - header_producer(&prev_hash, 1 + cht_size + cht_size) - }); - assert_eq!(raw_db.count(columns::HEADER), 2 + ucht_size + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // now finalize the block. - for i in (0..(ucht_size + ucht_size)).map(|i| i + 1) { - db.finalize_header(BlockId::Number(i as _)).unwrap(); - } - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - (raw_db, db) - } - - // when headers are created without changes tries roots - let (raw_db, db) = insert_headers(default_header); - let cht_size: u64 = cht::size(); - assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), (2 * (1 + cht_size + 1)) as usize); - assert_eq!(raw_db.count(columns::CHT), 1); - assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - assert!(db.changes_trie_cht_root(cht_size, cht_size / 2).is_err()); - assert!(db.changes_trie_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - - // when headers are created with changes tries roots - let (raw_db, db) = insert_headers(header_with_changes_trie); - assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); - assert_eq!(raw_db.count(columns::CHT), 2); - assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - assert!(db.changes_trie_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.changes_trie_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - } - - #[test] - fn get_cht_fails_for_genesis_block() { - assert!(LightStorage::::new_test().header_cht_root(cht::size(), 0).is_err()); - } - - #[test] - fn get_cht_fails_for_non_existent_cht() { - let cht_size: u64 = cht::size(); - assert!(LightStorage::::new_test() - .header_cht_root(cht_size, cht_size / 2) - .unwrap() - .is_none()); - } - - #[test] - fn get_cht_works() { - let db = LightStorage::new_test(); - - // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_final_block(&db, HashMap::new(), || { - header_with_changes_trie(&Default::default(), 0) - }); - let cht_size: u64 = cht::size(); - let ucht_size: usize = cht_size as _; - for i in 1..1 + ucht_size + ucht_size + 1 { - prev_hash = insert_block(&db, HashMap::new(), || { - header_with_changes_trie(&prev_hash, i as u64) - }); - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - } - - let cht_root_1 = - db.header_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db - .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap() - .unwrap(); - let cht_root_3 = - db.header_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - - let cht_root_1 = db - .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)) - .unwrap() - .unwrap(); - let cht_root_2 = db - .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap() - .unwrap(); - let cht_root_3 = db - .changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)) - .unwrap() - .unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - } - - #[test] - fn tree_route_works() { - let db = LightStorage::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - // fork from genesis: 3 prong. - let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); - let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); - - // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || { - header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) - }); - let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); - - { - let tree_route = tree_route(&db, a3, b2).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert_eq!( - tree_route.retracted().iter().map(|r| r.hash).collect::>(), - vec![a3, a2, a1] - ); - assert_eq!( - tree_route.enacted().iter().map(|r| r.hash).collect::>(), - vec![b1, b2] - ); - } - - { - let tree_route = tree_route(&db, a1, a3).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert!(tree_route.retracted().is_empty()); - assert_eq!( - tree_route.enacted().iter().map(|r| r.hash).collect::>(), - vec![a2, a3] - ); - } - - { - let tree_route = tree_route(&db, a3, a1).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert_eq!( - tree_route.retracted().iter().map(|r| r.hash).collect::>(), - vec![a3, a2] - ); - assert!(tree_route.enacted().is_empty()); - } - - { - let tree_route = tree_route(&db, a2, a2).unwrap(); - - assert_eq!(tree_route.common_block().hash, a2); - assert!(tree_route.retracted().is_empty()); - assert!(tree_route.enacted().is_empty()); - } - } - - #[test] - fn lowest_common_ancestor_works() { - let db = LightStorage::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - // fork from genesis: 3 prong. - let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); - let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); - - // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || { - header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) - }); - let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); - - { - let lca = lowest_common_ancestor(&db, a3, b2).unwrap(); - - assert_eq!(lca.hash, block0); - assert_eq!(lca.number, 0); - } - - { - let lca = lowest_common_ancestor(&db, a1, a3).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a3, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a2, a3).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - - { - let lca = lowest_common_ancestor(&db, a2, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a2, a2).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - } - - #[test] - fn authorities_are_cached() { - let db = LightStorage::new_test(); - - fn run_checks( - db: &LightStorage, - max: u64, - checks: &[(u64, Option>)], - ) { - for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { - let actual = authorities(db.cache(), BlockId::Number(*at)); - assert_eq!(*expected, actual); - } - } - - fn same_authorities() -> HashMap> { - HashMap::new() - } - - fn make_authorities( - authorities: Vec, - ) -> HashMap> { - let mut map = HashMap::new(); - map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - map - } - - fn authorities( - cache: &dyn BlockchainCache, - at: BlockId, - ) -> Option> { - cache - .get_at(&well_known_cache_keys::AUTHORITIES, &at) - .unwrap_or(None) - .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) - } - - let auth1 = || AuthorityId::from_raw([1u8; 32]); - let auth2 = || AuthorityId::from_raw([2u8; 32]); - let auth3 = || AuthorityId::from_raw([3u8; 32]); - let auth4 = || AuthorityId::from_raw([4u8; 32]); - let auth5 = || AuthorityId::from_raw([5u8; 32]); - let auth6 = || AuthorityId::from_raw([6u8; 32]); - - let (hash2, hash6) = { - // first few blocks are instantly finalized - // B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(1, 2) - let checks = vec![ - (0, None), - (1, None), - (2, Some(vec![auth1()])), - (3, Some(vec![auth1()])), - (4, Some(vec![auth1(), auth2()])), - (5, Some(vec![auth1(), auth2()])), - (6, Some(vec![auth1(), auth2()])), - ]; - - let hash0 = insert_final_block(&db, same_authorities(), || { - default_header(&Default::default(), 0) - }); - run_checks(&db, 0, &checks); - let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); - run_checks(&db, 1, &checks); - let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || { - default_header(&hash1, 2) - }); - run_checks(&db, 2, &checks); - let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || { - default_header(&hash2, 3) - }); - run_checks(&db, 3, &checks); - let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { - default_header(&hash3, 4) - }); - run_checks(&db, 4, &checks); - let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { - default_header(&hash4, 5) - }); - run_checks(&db, 5, &checks); - let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); - run_checks(&db, 6, &checks); - - (hash2, hash6) - }; - - { - // some older non-best blocks are inserted - // ... -> B2(1) -> B2_1(1) -> B2_2(2) - // => the cache ignores all writes before best finalized block - let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || { - default_header(&hash2, 3) - }); - assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_1))); - let hash2_2 = - insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || { - default_header(&hash2_1, 4) - }); - assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_2))); - } - - let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = { - // inserting non-finalized blocks - // B6(None) -> B7(3) -> B8(3) - // \> B6_1(4) -> B6_2(4) - // \> B6_1_1(5) - // \> B6_1_2(6) -> B6_1_3(7) - - let hash7 = - insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - let hash8 = - insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - let hash6_1 = - insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || { - default_header(&hash6_1, 8) - }); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || { - default_header(&hash6_1, 8) - }); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - let hash6_2 = - insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - - (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) - }; - - { - // finalize block hash6_1 - db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - // finalize block hash6_2 - db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - } - } - - #[test] - fn database_is_reopened() { - let db = LightStorage::new_test(); - let hash0 = - insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.info().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), hash0); - - let db = db.db; - let db = LightStorage::from_kvdb(db).unwrap(); - assert_eq!(db.info().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash::(hash0)).unwrap().unwrap().hash(), hash0); - } - - #[test] - fn aux_store_works() { - let db = LightStorage::::new_test(); - - // insert aux1 + aux2 using direct store access - db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()) - .unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), None); - - // delete aux1 + insert aux3 using import operation - db.import_header( - default_header(&Default::default(), 0), - HashMap::new(), - NewBlockState::Best, - vec![(vec![3], Some(vec![103])), (vec![1], None)], - ) - .unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), None); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), Some(vec![103])); - } - - #[test] - fn cache_can_be_initialized_after_genesis_inserted() { - let (genesis_hash, storage) = { - let db = LightStorage::::new_test(); - - // before cache is initialized => Err - assert!(db.cache().get_at(b"test", &BlockId::Number(0)).is_err()); - - // insert genesis block (no value for cache is provided) - let mut genesis_hash = None; - insert_block(&db, HashMap::new(), || { - let header = default_header(&Default::default(), 0); - genesis_hash = Some(header.hash()); - header - }); - - // after genesis is inserted => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), None); - - // initialize cache - db.cache().initialize(b"test", vec![42]).unwrap(); - - // after genesis is inserted + cache is initialized => Some - assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), - Some(((0, genesis_hash.unwrap()), None, vec![42])), - ); - - (genesis_hash, db.db) - }; - - // restart && check that after restart value is read from the cache - let db = - LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); - assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), - Some(((0, genesis_hash.unwrap()), None, vec![42])), - ); - } - - #[test] - fn changes_trie_configuration_is_tracked_on_light_client() { - let db = LightStorage::::new_test(); - - let new_config = Some(ChangesTrieConfiguration::new(2, 2)); - - // insert block#0 && block#1 (no value for cache is provided) - let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!( - db.cache() - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)) - .unwrap() - .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), - None, - ); - - // insert configuration at block#1 (starts from block#2) - insert_block(&db, HashMap::new(), || { - let mut header = default_header(&hash0, 1); - header.digest_mut().push(DigestItem::ChangesTrieSignal( - ChangesTrieSignal::NewConfiguration(new_config.clone()), - )); - header - }); - assert_eq!( - db.cache() - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)) - .unwrap() - .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), - Some(new_config), - ); - } -} diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 1b645ca9fb2b9..61c0b94dc701c 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use crate::{ - columns, light, + columns, utils::{DatabaseType, NUM_COLUMNS}, }; /// A `Database` adapter for parity-db. @@ -61,10 +61,6 @@ pub fn open>( state_col.preimage = true; state_col.uniform = true; }, - DatabaseType::Light => { - config.columns[light::columns::HEADER as usize].compression = - parity_db::CompressionType::Lz4; - }, } let db = if create { diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 0e895eaaf3851..b098a7864bafb 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -56,10 +56,6 @@ pub mod meta_keys { pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; /// Block gap. pub const BLOCK_GAP: &[u8; 3] = b"gap"; - /// Meta information prefix for list-based caches. - pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; - /// Meta information for changes tries key. - pub const CHANGES_TRIES_META: &[u8; 5] = b"ctrie"; /// Genesis block hash. pub const GENESIS_HASH: &[u8; 3] = b"gen"; /// Leaves prefix list key. @@ -95,8 +91,6 @@ pub type NumberIndexKey = [u8; 4]; pub enum DatabaseType { /// Full node database. Full, - /// Light node database. - Light, } /// Convert block number into short lookup key (LE representation) for @@ -124,19 +118,6 @@ where Ok(lookup_key) } -/// Convert block lookup key into block number. -/// all block lookup keys start with the block number. -pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result -where - N: From, -{ - if key.len() < 4 { - return Err(sp_blockchain::Error::Backend("Invalid block key".into())) - } - Ok((key[0] as u32) << 24 | (key[1] as u32) << 16 | (key[2] as u32) << 8 | (key[3] as u32)) - .map(Into::into) -} - /// Delete number to hash mapping in DB transaction. pub fn remove_number_to_key_mapping>( transaction: &mut Transaction, @@ -147,18 +128,6 @@ pub fn remove_number_to_key_mapping>( Ok(()) } -/// Remove key mappings. -pub fn remove_key_mappings, H: AsRef<[u8]>>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, - hash: H, -) -> sp_blockchain::Result<()> { - remove_number_to_key_mapping(transaction, key_lookup_col, number)?; - transaction.remove(key_lookup_col, hash.as_ref()); - Ok(()) -} - /// Place a number mapping into the database. This maps number to current perceived /// block hash at that position. pub fn insert_number_to_key_mapping + Clone, H: AsRef<[u8]>>( @@ -357,18 +326,6 @@ fn open_kvdb_rocksdb( other_col_budget, ); }, - DatabaseType::Light => { - let col_budget = cache_size / (NUM_COLUMNS as usize); - for i in 0..NUM_COLUMNS { - memory_budget.insert(i, col_budget); - } - log::trace!( - target: "db", - "Open RocksDB light database at {:?}, column cache: {} MiB", - path, - col_budget, - ); - }, } db_config.memory_budget = memory_budget; @@ -424,8 +381,7 @@ fn maybe_migrate_to_type_subdir( // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and // the target path ends in a role specific directory if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && - (p.ends_with(DatabaseType::Full.as_str()) || - p.ends_with(DatabaseType::Light.as_str())) + (p.ends_with(DatabaseType::Full.as_str())) { // Try to open the database to check if the current `DatabaseType` matches the type of // database stored in the target directory and close the database on success. @@ -501,18 +457,6 @@ pub fn read_header( } } -/// Required header from the database. -pub fn require_header( - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId, -) -> sp_blockchain::Result { - read_header(db, col_index, col, id).and_then(|header| { - header.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id))) - }) -} - /// Read meta from the database. pub fn read_meta( db: &dyn Database, @@ -598,7 +542,6 @@ impl DatabaseType { pub fn as_str(&self) -> &'static str { match *self { DatabaseType::Full => "full", - DatabaseType::Light => "light", } } } @@ -669,23 +612,12 @@ mod tests { assert!(old_db_path.join(db_type.as_str()).join(db_check_file).exists()); } - check_dir_for_db_type( - DatabaseType::Light, - DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, - "db_version", - ); check_dir_for_db_type( DatabaseType::Full, DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, "db_version", ); - #[cfg(feature = "with-parity-db")] - check_dir_for_db_type( - DatabaseType::Light, - DatabaseSource::ParityDb { path: PathBuf::new() }, - "metadata", - ); #[cfg(feature = "with-parity-db")] check_dir_for_db_type( DatabaseType::Full, @@ -709,16 +641,8 @@ mod tests { assert!(!old_db_path.join("light/db_version").exists()); assert!(!old_db_path.join("full/db_version").exists()); } - let source = DatabaseSource::RocksDb { - path: old_db_path.join(DatabaseType::Light.as_str()), - cache_size: 128, - }; - let settings = db_settings(source); - let db_res = open_database::(&settings, DatabaseType::Light); - assert!(db_res.is_err(), "Opening a light database in full role should fail"); // assert nothing was changed assert!(old_db_path.join("db_version").exists()); - assert!(!old_db_path.join("light/db_version").exists()); assert!(!old_db_path.join("full/db_version").exists()); } } @@ -735,7 +659,6 @@ mod tests { #[test] fn database_type_as_str_works() { assert_eq!(DatabaseType::Full.as_str(), "full"); - assert_eq!(DatabaseType::Light.as_str(), "light"); } #[test] diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 1cded769c6856..7aa02a61dba11 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -40,7 +40,7 @@ use tracing_subscriber::layer::SubscriberExt; use crate::WasmExecutionMethod; -pub type TestExternalities = CoreTestExternalities; +pub type TestExternalities = CoreTestExternalities; type HostFunctions = sp_io::SubstrateHostFunctions; /// Simple macro that runs a given method as test with the available wasm execution methods. diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index d54f7234b44b4..000f7397ac9d9 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -19,7 +19,7 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use log::debug; -use parity_scale_codec::{Decode, Encode}; +use parity_scale_codec::Decode; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sc_consensus::{ @@ -35,7 +35,7 @@ use sp_core::hashing::twox_128; use sp_finality_grandpa::{ConsensusLog, GrandpaApi, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, }; @@ -89,7 +89,6 @@ impl JustificationImport for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, SC: SelectChain, @@ -229,7 +228,6 @@ pub fn find_forced_change( impl GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, Client::Api: GrandpaApi, @@ -515,7 +513,6 @@ where impl BlockImport for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, Client::Api: GrandpaApi, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 452659ced6a70..e7618929fd089 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -58,7 +58,7 @@ use futures::{prelude::*, StreamExt}; use log::{debug, error, info}; -use parity_scale_codec::{Decode, Encode}; +use parity_scale_codec::Decode; use parking_lot::RwLock; use prometheus_endpoint::{PrometheusError, Registry}; use sc_client_api::{ @@ -77,7 +77,7 @@ use sp_core::crypto::Public; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, DigestFor, NumberFor, Zero}, + traits::{Block as BlockT, NumberFor, Zero}, }; pub use finality_grandpa::BlockNumberOps; @@ -718,7 +718,6 @@ where SC: SelectChain + 'static, VR: VotingRule + Clone + 'static, NumberFor: BlockNumberOps, - DigestFor: Encode, C: ClientForGrandpa + 'static, C::Api: GrandpaApi, { diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 1aef7cd1b017a..8439bf6963d01 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -43,6 +43,7 @@ use sp_finality_grandpa::{ use sp_keyring::Ed25519Keyring; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ + codec::Encode, generic::{BlockId, DigestItem}, traits::{Block as BlockT, Header as HeaderT}, Justifications, @@ -139,26 +140,16 @@ impl TestNetFactory for GrandpaTestNet { &self, client: PeersClient, ) -> (BlockImportAdapter, Option>, PeerData) { - match client { - PeersClient::Full(ref client, ref backend) => { - let (import, link) = block_import( - client.clone(), - &self.test_config, - LongestChain::new(backend.clone()), - None, - ) - .expect("Could not create block import for fresh peer."); - let justification_import = Box::new(import.clone()); - ( - BlockImportAdapter::new(import), - Some(justification_import), - Mutex::new(Some(link)), - ) - }, - PeersClient::Light(..) => { - panic!("Light client is not used in tests."); - }, - } + let (client, backend) = (client.as_client(), client.as_backend()); + let (import, link) = block_import( + client.clone(), + &self.test_config, + LongestChain::new(backend.clone()), + None, + ) + .expect("Could not create block import for fresh peer."); + let justification_import = Box::new(import.clone()); + (BlockImportAdapter::new(import), Some(justification_import), Mutex::new(Some(link))) } fn peer(&mut self, i: usize) -> &mut GrandpaPeer { @@ -466,7 +457,7 @@ fn finalize_3_voters_1_full_observer() { // all peers should have stored the justification for the best finalized block #20 for peer_id in 0..4 { - let client = net.lock().peers[peer_id].client().as_full().unwrap(); + let client = net.lock().peers[peer_id].client().as_client(); let justification = crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); @@ -539,7 +530,7 @@ fn transition_3_voters_twice_1_full_observer() { net.lock().block_until_sync(); for (i, peer) in net.lock().peers().iter().enumerate() { - let full_client = peer.client().as_full().expect("only full clients are used in test"); + let full_client = peer.client().as_client(); assert_eq!(full_client.chain_info().best_number, 1, "Peer #{} failed to sync", i); let set: AuthoritySet = @@ -614,7 +605,7 @@ fn transition_3_voters_twice_1_full_observer() { .take_while(|n| future::ready(n.header.number() < &30)) .for_each(move |_| future::ready(())) .map(move |()| { - let full_client = client.as_full().expect("only full clients are used in test"); + let full_client = client.as_client(); let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); @@ -835,7 +826,7 @@ fn force_change_to_new_set() { for (i, peer) in net.lock().peers().iter().enumerate() { assert_eq!(peer.client().info().best_number, 26, "Peer #{} failed to sync", i); - let full_client = peer.client().as_full().expect("only full clients are used in test"); + let full_client = peer.client().as_client(); let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); @@ -861,7 +852,7 @@ fn allows_reimporting_change_blocks() { let client = net.peer(0).client().clone(); let (mut block_import, ..) = net.make_block_import(client.clone()); - let full_client = client.as_full().unwrap(); + let full_client = client.as_client(); let builder = full_client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap(); @@ -908,7 +899,7 @@ fn test_bad_justification() { let client = net.peer(0).client().clone(); let (mut block_import, ..) = net.make_block_import(client.clone()); - let full_client = client.as_full().expect("only full clients are used in test"); + let full_client = client.as_client(); let builder = full_client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap(); @@ -1148,7 +1139,7 @@ fn voter_persists_its_votes() { .await; let block_30_hash = - net.lock().peer(0).client().as_full().unwrap().hash(30).unwrap().unwrap(); + net.lock().peer(0).client().as_client().hash(30).unwrap().unwrap(); // we restart alice's voter abort.abort(); @@ -1581,7 +1572,7 @@ fn imports_justification_for_regular_blocks_on_import() { let client = net.peer(0).client().clone(); let (mut block_import, ..) = net.make_block_import(client.clone()); - let full_client = client.as_full().expect("only full clients are used in test"); + let full_client = client.as_client(); let builder = full_client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap(); diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml deleted file mode 100644 index 1b83bf5be42f4..0000000000000 --- a/client/light/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -description = "components for a light client" -name = "sc-light" -version = "4.0.0-dev" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -documentation = "https://docs.rs/sc-light" -readme = "README.md" - -[dependencies] -parking_lot = "0.11.1" -hash-db = "0.15.2" -sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } -sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" } -sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } -sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } -sc-client-api = { version = "4.0.0-dev", path = "../api" } -sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "2.0.0" } -sc-executor = { version = "0.10.0-dev", path = "../executor" } - -[features] -default = [] diff --git a/client/light/README.md b/client/light/README.md deleted file mode 100644 index 1ba1f155b1652..0000000000000 --- a/client/light/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Light client components. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs deleted file mode 100644 index 3091dce625a3f..0000000000000 --- a/client/light/src/backend.rs +++ /dev/null @@ -1,578 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client backend. Only stores headers and justifications of blocks. -//! Everything else is requested from full nodes on demand. - -use parking_lot::RwLock; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use codec::{Decode, Encode}; - -use super::blockchain::Blockchain; -use hash_db::Hasher; -use sc_client_api::{ - backend::{ - AuxStore, Backend as ClientBackend, BlockImportOperation, NewBlockState, - PrunableStateChangesTrieStorage, RemoteBackend, - }, - blockchain::{well_known_cache_keys, HeaderBackend as BlockchainHeaderBackend}, - in_mem::check_genesis_storage, - light::Storage as BlockchainStorage, - UsageInfo, -}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_core::{ - offchain::storage::InMemOffchainStorage, - storage::{well_known_keys, ChildInfo}, - ChangesTrieConfiguration, -}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, - Justification, Justifications, Storage, -}; -use sp_state_machine::{ - Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, - IndexOperation, StorageCollection, TrieBackend, -}; - -const IN_MEMORY_EXPECT_PROOF: &str = - "InMemory state backend has Void error type and always succeeds; qed"; - -/// Light client backend. -pub struct Backend { - blockchain: Arc>, - genesis_state: RwLock>>, - import_lock: RwLock<()>, -} - -/// Light block (header and justification) import operation. -pub struct ImportOperation { - header: Option, - cache: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec>, - set_head: Option>, - storage_update: Option>>, - changes_trie_config_update: Option>, - _phantom: std::marker::PhantomData, -} - -/// Either in-memory genesis state, or locally-unavailable state. -pub enum GenesisOrUnavailableState { - /// Genesis state - storage values are stored in-memory. - Genesis(InMemoryBackend), - /// We know that state exists, but all calls will fail with error, because it - /// isn't locally available. - Unavailable, -} - -impl Backend { - /// Create new light backend. - pub fn new(blockchain: Arc>) -> Self { - Self { blockchain, genesis_state: RwLock::new(None), import_lock: Default::default() } - } - - /// Get shared blockchain reference. - pub fn blockchain(&self) -> &Arc> { - &self.blockchain - } -} - -impl AuxStore for Backend { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - insert: I, - delete: D, - ) -> ClientResult<()> { - self.blockchain.storage().insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - self.blockchain.storage().get_aux(key) - } -} - -impl ClientBackend for Backend> -where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, -{ - type BlockImportOperation = ImportOperation; - type Blockchain = Blockchain; - type State = GenesisOrUnavailableState>; - type OffchainStorage = InMemOffchainStorage; - - fn begin_operation(&self) -> ClientResult { - Ok(ImportOperation { - header: None, - cache: Default::default(), - leaf_state: NewBlockState::Normal, - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - storage_update: None, - changes_trie_config_update: None, - _phantom: Default::default(), - }) - } - - fn begin_state_operation( - &self, - _operation: &mut Self::BlockImportOperation, - _block: BlockId, - ) -> ClientResult<()> { - Ok(()) - } - - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { - if !operation.finalized_blocks.is_empty() { - for block in operation.finalized_blocks { - self.blockchain.storage().finalize_header(block)?; - } - } - - if let Some(header) = operation.header { - let is_genesis_import = header.number().is_zero(); - if let Some(new_config) = operation.changes_trie_config_update { - operation - .cache - .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); - } - self.blockchain.storage().import_header( - header, - operation.cache, - operation.leaf_state, - operation.aux_ops, - )?; - - // when importing genesis block => remember its state - if is_genesis_import { - *self.genesis_state.write() = operation.storage_update.take(); - } - } else { - for (key, maybe_val) in operation.aux_ops { - match maybe_val { - Some(val) => self - .blockchain - .storage() - .insert_aux(&[(&key[..], &val[..])], std::iter::empty())?, - None => - self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, - } - } - } - - if let Some(set_head) = operation.set_head { - self.blockchain.storage().set_head(set_head)?; - } - - Ok(()) - } - - fn finalize_block( - &self, - block: BlockId, - _justification: Option, - ) -> ClientResult<()> { - self.blockchain.storage().finalize_header(block) - } - - fn append_justification( - &self, - _block: BlockId, - _justification: Justification, - ) -> ClientResult<()> { - Ok(()) - } - - fn blockchain(&self) -> &Blockchain { - &self.blockchain - } - - fn usage_info(&self) -> Option { - self.blockchain.storage().usage_info() - } - - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { - None - } - - fn offchain_storage(&self) -> Option { - None - } - - fn state_at(&self, block: BlockId) -> ClientResult { - let block_number = self.blockchain.expect_block_number_from_id(&block)?; - - // special case for genesis block - if block_number.is_zero() { - if let Some(genesis_state) = self.genesis_state.read().clone() { - return Ok(GenesisOrUnavailableState::Genesis(genesis_state)) - } - } - - // else return unavailable state. We do not return error here, because error - // would mean that we do not know this state at all. But we know that it exists - Ok(GenesisOrUnavailableState::Unavailable) - } - - fn revert( - &self, - _n: NumberFor, - _revert_finalized: bool, - ) -> ClientResult<(NumberFor, HashSet)> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn remove_leaf_block(&self, _hash: &Block::Hash) -> ClientResult<()> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn get_import_lock(&self) -> &RwLock<()> { - &self.import_lock - } -} - -impl RemoteBackend for Backend> -where - Block: BlockT, - S: BlockchainStorage + 'static, - Block::Hash: Ord, -{ - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.genesis_state.read().is_some() && - self.blockchain - .expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } - - fn remote_blockchain(&self) -> Arc> { - self.blockchain.clone() - } -} - -impl BlockImportOperation for ImportOperation -where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, -{ - type State = GenesisOrUnavailableState>; - - fn state(&self) -> ClientResult> { - // None means 'locally-stateless' backend - Ok(None) - } - - fn set_block_data( - &mut self, - header: Block::Header, - _body: Option>, - _indexed_body: Option>>, - _justifications: Option, - state: NewBlockState, - ) -> ClientResult<()> { - self.leaf_state = state; - self.header = Some(header); - Ok(()) - } - - fn update_cache(&mut self, cache: HashMap>) { - self.cache = cache; - } - - fn update_db_storage( - &mut self, - _update: >>::Transaction, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn update_changes_trie( - &mut self, - _update: ChangesTrieTransaction, NumberFor>, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn set_genesis_state(&mut self, input: Storage, commit: bool) -> ClientResult { - check_genesis_storage(&input)?; - - // changes trie configuration - let changes_trie_config = input - .top - .iter() - .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) - .map(|(_, v)| { - Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis") - }); - self.changes_trie_config_update = Some(changes_trie_config); - - // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, _> = HashMap::new(); - storage.insert(None, input.top); - - // create a list of children keys to re-compute roots for - let child_delta = input - .children_default - .iter() - .map(|(_storage_key, storage_child)| (&storage_child.child_info, std::iter::empty())); - - // make sure to persist the child storage - for (_child_key, storage_child) in input.children_default.clone() { - storage.insert(Some(storage_child.child_info), storage_child.data); - } - - let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); - if commit { - self.storage_update = Some(storage_update); - } - - Ok(storage_root) - } - - fn reset_storage(&mut self, _input: Storage) -> ClientResult { - Err(ClientError::NotAvailableOnLightClient) - } - - fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where - I: IntoIterator, Option>)>, - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage( - &mut self, - _update: StorageCollection, - _child_update: ChildStorageCollection, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn mark_finalized( - &mut self, - block: BlockId, - _justifications: Option, - ) -> ClientResult<()> { - self.finalized_blocks.push(block); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { - self.set_head = Some(block); - Ok(()) - } - - fn update_transaction_index( - &mut self, - _index: Vec, - ) -> sp_blockchain::Result<()> { - // noop for the light client - Ok(()) - } -} - -impl std::fmt::Debug for GenesisOrUnavailableState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.fmt(f), - GenesisOrUnavailableState::Unavailable => write!(f, "Unavailable"), - } - } -} - -impl StateBackend for GenesisOrUnavailableState -where - H::Out: Ord + codec::Codec, -{ - type Error = ClientError; - type Transaction = as StateBackend>::Transaction; - type TrieBackendStorage = as StateBackend>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.storage(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_storage_key(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_child_storage_key(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_key_values_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - action: A, - allow_missing: bool, - ) -> ClientResult { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => Ok(state - .apply_to_key_values_while(child_info, prefix, start_at, action, allow_missing) - .expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.apply_to_keys_while(child_info, prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(child_info, prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) - where - H::Out: Ord, - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta), - GenesisOrUnavailableState::Unavailable => Default::default(), - } - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) - where - H::Out: Ord, - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta); - (root, is_equal, Default::default()) - }, - GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), - } - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.pairs(), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.keys(prefix), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn register_overlay_stats(&self, _stats: &sp_state_machine::StateMachineStats) {} - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - sp_state_machine::UsageInfo::empty() - } - - fn as_trie_backend(&self) -> Option<&TrieBackend> { - match self { - GenesisOrUnavailableState::Genesis(ref state) => state.as_trie_backend(), - GenesisOrUnavailableState::Unavailable => None, - } - } -} diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs deleted file mode 100644 index 24d9ef4fd4b95..0000000000000 --- a/client/light/src/blockchain.rs +++ /dev/null @@ -1,219 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client blockchain backend. Only stores headers and justifications of recent -//! blocks. CHT roots are stored for headers of ancient blocks. - -use std::sync::Arc; - -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, - Justifications, -}; - -use sc_client_api::light::RemoteHeaderRequest; -pub use sc_client_api::{ - backend::{AuxStore, NewBlockState, ProvideChtRoots}, - blockchain::{ - well_known_cache_keys, Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, - HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, - }, - cht, - light::{LocalOrRemote, RemoteBlockchain, Storage}, -}; -use sp_blockchain::{ - CachedHeaderMetadata, Error as ClientError, HeaderMetadata, Result as ClientResult, -}; - -/// Light client blockchain. -pub struct Blockchain { - storage: S, -} - -impl Blockchain { - /// Create new light blockchain backed with given storage. - pub fn new(storage: S) -> Self { - Self { storage } - } - - /// Get storage reference. - pub fn storage(&self) -> &S { - &self.storage - } -} - -impl BlockchainHeaderBackend for Blockchain -where - Block: BlockT, - S: Storage, -{ - fn header(&self, id: BlockId) -> ClientResult> { - match RemoteBlockchain::header(self, id)? { - LocalOrRemote::Local(header) => Ok(Some(header)), - LocalOrRemote::Remote(_) => Err(ClientError::NotAvailableOnLightClient), - LocalOrRemote::Unknown => Ok(None), - } - } - - fn info(&self) -> BlockchainInfo { - self.storage.info() - } - - fn status(&self, id: BlockId) -> ClientResult { - self.storage.status(id) - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - self.storage.number(hash) - } - - fn hash( - &self, - number: <::Header as HeaderT>::Number, - ) -> ClientResult> { - self.storage.hash(number) - } -} - -impl HeaderMetadata for Blockchain -where - Block: BlockT, - S: Storage, -{ - type Error = ClientError; - - fn header_metadata( - &self, - hash: Block::Hash, - ) -> Result, Self::Error> { - self.storage.header_metadata(hash) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.storage.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.storage.remove_header_metadata(hash) - } -} - -impl BlockchainBackend for Blockchain -where - Block: BlockT, - S: Storage, -{ - fn body(&self, _id: BlockId) -> ClientResult>> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn justifications(&self, _id: BlockId) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn last_finalized(&self) -> ClientResult { - self.storage.last_finalized() - } - - fn cache(&self) -> Option>> { - self.storage.cache() - } - - fn leaves(&self) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn indexed_transaction(&self, _hash: &Block::Hash) -> ClientResult>> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn block_indexed_body( - &self, - _id: BlockId, - ) -> sp_blockchain::Result>>> { - Err(ClientError::NotAvailableOnLightClient) - } -} - -impl, Block: BlockT> ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - self.storage.cache() - } -} - -impl RemoteBlockchain for Blockchain -where - S: Storage, -{ - fn header( - &self, - id: BlockId, - ) -> ClientResult>> { - // first, try to read header from local storage - if let Some(local_header) = self.storage.header(id)? { - return Ok(LocalOrRemote::Local(local_header)) - } - - // we need to know block number to check if it's a part of CHT - let number = match id { - BlockId::Hash(hash) => match self.storage.number(hash)? { - Some(number) => number, - None => return Ok(LocalOrRemote::Unknown), - }, - BlockId::Number(number) => number, - }; - - // if the header is genesis (never pruned), non-canonical, or from future => return - if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown - { - return Ok(LocalOrRemote::Unknown) - } - - Ok(LocalOrRemote::Remote(RemoteHeaderRequest { - cht_root: match self.storage.header_cht_root(cht::size(), number)? { - Some(cht_root) => cht_root, - None => return Ok(LocalOrRemote::Unknown), - }, - block: number, - retry_count: None, - })) - } -} - -impl, Block: BlockT> ProvideChtRoots for Blockchain { - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage().header_cht_root(cht_size, block) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage().changes_trie_cht_root(cht_size, block) - } -} diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs deleted file mode 100644 index a0776131e406d..0000000000000 --- a/client/light/src/call_executor.rs +++ /dev/null @@ -1,206 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Methods that light client could use to execute runtime calls. - -use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; - -use codec::{Decode, Encode}; -use hash_db::Hasher; -use sp_core::{ - convert_hash, - traits::{CodeExecutor, SpawnNamed}, - NativeOrEncoded, -}; -use sp_externalities::Extensions; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; -use sp_state_machine::{ - create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, - ExecutionStrategy, OverlayedChanges, StorageProof, -}; - -use sp_api::{ProofRecorder, StorageTransactionCache}; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -use sc_client_api::{ - backend::RemoteBackend, call_executor::CallExecutor, light::RemoteCallRequest, -}; -use sc_executor::RuntimeVersion; - -/// Call executor that is able to execute calls only on genesis state. -/// -/// Trying to execute call on non-genesis state leads to error. -pub struct GenesisCallExecutor { - backend: Arc, - local: L, -} - -impl GenesisCallExecutor { - /// Create new genesis call executor. - pub fn new(backend: Arc, local: L) -> Self { - Self { backend, local } - } -} - -impl Clone for GenesisCallExecutor { - fn clone(&self) -> Self { - GenesisCallExecutor { backend: self.backend.clone(), local: self.local.clone() } - } -} - -impl CallExecutor for GenesisCallExecutor -where - Block: BlockT, - B: RemoteBackend, - Local: CallExecutor, -{ - type Error = ClientError; - - type Backend = B; - - fn call( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - extensions: Option, - ) -> ClientResult> { - if self.backend.is_local_state_available(id) { - self.local.call(id, method, call_data, strategy, extensions) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn contextual_call< - EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &RefCell, - _: Option<&RefCell>>, - _manager: ExecutionManager, - native_call: Option, - recorder: &Option>, - extensions: Option, - ) -> ClientResult> - where - ExecutionManager: Clone, - { - // there's no actual way/need to specify native/wasm execution strategy on light node - // => we can safely ignore passed values - - if self.backend.is_local_state_available(at) { - CallExecutor::contextual_call::< - fn( - Result, Local::Error>, - Result, Local::Error>, - ) -> Result, Local::Error>, - _, - NC, - >( - &self.local, - at, - method, - call_data, - changes, - None, - ExecutionManager::NativeWhenPossible, - native_call, - recorder, - extensions, - ) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn prove_execution( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - ) -> ClientResult<(Vec, StorageProof)> { - if self.backend.is_local_state_available(at) { - self.local.prove_execution(at, method, call_data) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn runtime_version(&self, id: &BlockId) -> ClientResult { - if self.backend.is_local_state_available(id) { - self.local.runtime_version(id) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } -} - -/// Check remote contextual execution proof using given backend. -/// -/// Proof should include the method execution proof. -pub fn check_execution_proof( - executor: &E, - spawn_handle: Box, - request: &RemoteCallRequest

, - remote_proof: StorageProof, -) -> ClientResult> -where - Header: HeaderT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, -{ - let local_state_root = request.header.state_root(); - let root: H::Out = convert_hash(&local_state_root); - - // prepare execution environment - let mut changes = OverlayedChanges::default(); - let trie_backend = create_proof_check_backend(root, remote_proof)?; - - // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code - .runtime_code() - .map_err(|_e| ClientError::RuntimeCodeMissing)?; - - // execute method - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - spawn_handle, - &request.method, - &request.call_data, - &runtime_code, - ) - .map_err(Into::into) -} diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs deleted file mode 100644 index 4b084cda0f8b1..0000000000000 --- a/client/light/src/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client components. - -use sp_runtime::traits::{Block as BlockT, HashFor}; -use std::sync::Arc; - -pub mod backend; -pub mod blockchain; -pub mod call_executor; - -pub use backend::*; -pub use blockchain::*; -pub use call_executor::*; - -use sc_client_api::light::Storage as BlockchainStorage; - -/// Create an instance of light client backend. -pub fn new_light_backend(blockchain: Arc>) -> Arc>> -where - B: BlockT, - S: BlockchainStorage, -{ - Arc::new(Backend::new(blockchain)) -} diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 7b334175a2805..e2b950cf67e8c 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -20,14 +20,14 @@ use crate::{ bitswap::Bitswap, config::ProtocolId, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, - light_client_requests, peer_info, + peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, request_responses, DhtEvent, ObservedRole, }; use bytes::Bytes; use codec::Encode; -use futures::{channel::oneshot, stream::StreamExt}; +use futures::channel::oneshot; use libp2p::{ core::{Multiaddr, PeerId, PublicKey}, identify::IdentifyInfo, @@ -76,10 +76,6 @@ pub struct Behaviour { #[behaviour(ignore)] events: VecDeque>, - /// Light client request handling. - #[behaviour(ignore)] - light_client_request_sender: light_client_requests::sender::LightClientRequestSender, - /// Protocol name used to send out block requests via /// [`request_responses::RequestResponsesBehaviour`]. #[behaviour(ignore)] @@ -198,7 +194,6 @@ impl Behaviour { substrate: Protocol, user_agent: String, local_public_key: PublicKey, - light_client_request_sender: light_client_requests::sender::LightClientRequestSender, disco_config: DiscoveryConfig, block_request_protocol_config: request_responses::ProtocolConfig, state_request_protocol_config: request_responses::ProtocolConfig, @@ -233,7 +228,6 @@ impl Behaviour { request_response_protocols.into_iter(), peerset, )?, - light_client_request_sender, events: VecDeque::new(), block_request_protocol_name, state_request_protocol_name, @@ -316,14 +310,6 @@ impl Behaviour { pub fn put_value(&mut self, key: record::Key, value: Vec) { self.discovery.put_value(key, value); } - - /// Issue a light client request. - pub fn light_client_request( - &mut self, - r: light_client_requests::sender::Request, - ) -> Result<(), light_client_requests::sender::SendRequestError> { - self.light_client_request_sender.request(r) - } } fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { @@ -436,17 +422,11 @@ impl NetworkBehaviourEventProcess> for Behavi CustomMessageOutcome::NotificationsReceived { remote, messages } => { self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); }, - CustomMessageOutcome::PeerNewBest(peer_id, number) => { - self.light_client_request_sender.update_best_block(&peer_id, number); - }, - CustomMessageOutcome::SyncConnected(peer_id) => { - self.light_client_request_sender.inject_connected(peer_id); - self.events.push_back(BehaviourOut::SyncConnected(peer_id)) - }, - CustomMessageOutcome::SyncDisconnected(peer_id) => { - self.light_client_request_sender.inject_disconnected(peer_id); - self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)) - }, + CustomMessageOutcome::PeerNewBest(_peer_id, _number) => {}, + CustomMessageOutcome::SyncConnected(peer_id) => + self.events.push_back(BehaviourOut::SyncConnected(peer_id)), + CustomMessageOutcome::SyncDisconnected(peer_id) => + self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)), CustomMessageOutcome::None => {}, } } @@ -534,23 +514,9 @@ impl NetworkBehaviourEventProcess for Behaviour { impl Behaviour { fn poll( &mut self, - cx: &mut Context, + _cx: &mut Context, _: &mut impl PollParameters, ) -> Poll>> { - use light_client_requests::sender::OutEvent; - while let Poll::Ready(Some(event)) = self.light_client_request_sender.poll_next_unpin(cx) { - match event { - OutEvent::SendRequest { target, request, pending_response, protocol_name } => - self.request_responses.send_request( - &target, - &protocol_name, - request, - pending_response, - IfDisconnected::ImmediateError, - ), - } - } - if let Some(event) = self.events.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 76c806ccbf7b6..8ef52e46fd071 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -23,7 +23,6 @@ pub use crate::{ chain::Client, - on_demand_layer::{AlwaysBadChecker, OnDemand}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, @@ -83,11 +82,6 @@ pub struct Params { /// Client that contains the blockchain. pub chain: Arc>, - /// The `OnDemand` object acts as a "receiver" for block data requests from the client. - /// If `Some`, the network worker will process these requests and answer them. - /// Normally used only for light clients. - pub on_demand: Option>>, - /// Pool of transactions. /// /// The network worker will fetch transactions from this object in order to propagate them on diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 2f81ddfa1fb13..cb16eb163ee46 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -247,7 +247,6 @@ mod behaviour; mod chain; mod discovery; -mod on_demand_layer; mod peer_info; mod protocol; mod request_responses; diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs index e18b783f219be..b1793ce9384ed 100644 --- a/client/network/src/light_client_requests.rs +++ b/client/network/src/light_client_requests.rs @@ -20,8 +20,6 @@ /// For incoming light client requests. pub mod handler; -/// For outgoing light client requests. -pub mod sender; use crate::{config::ProtocolId, request_responses::ProtocolConfig}; @@ -47,269 +45,3 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { inbound_queue: None, } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::{config::ProtocolId, request_responses::IncomingRequest}; - - use assert_matches::assert_matches; - use futures::{ - channel::oneshot, - executor::{block_on, LocalPool}, - prelude::*, - task::Spawn, - }; - use libp2p::PeerId; - use sc_client_api::{ - light::{ - self, ChangesProof, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, - RemoteHeaderRequest, RemoteReadRequest, - }, - FetchChecker, RemoteReadChildRequest, StorageProof, - }; - use sp_blockchain::Error as ClientError; - use sp_core::storage::ChildInfo; - use sp_runtime::{ - generic::Header, - traits::{BlakeTwo256, Block as BlockT, NumberFor}, - }; - use std::{collections::HashMap, sync::Arc}; - - pub struct DummyFetchChecker { - pub ok: bool, - pub _mark: std::marker::PhantomData, - } - - impl FetchChecker for DummyFetchChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - header: Option, - _remote_proof: StorageProof, - ) -> Result { - match self.ok { - true if header.is_some() => Ok(header.unwrap()), - _ => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_execution_proof( - &self, - _: &RemoteCallRequest, - _: StorageProof, - ) -> Result, ClientError> { - match self.ok { - true => Ok(vec![42]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_changes_proof( - &self, - _: &RemoteChangesRequest, - _: ChangesProof, - ) -> Result, u32)>, ClientError> { - match self.ok { - true => Ok(vec![(100u32.into(), 2)]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_body_proof( - &self, - _: &RemoteBodyRequest, - body: Vec, - ) -> Result, ClientError> { - match self.ok { - true => Ok(body), - false => Err(ClientError::Backend("Test error".into())), - } - } - } - - pub fn protocol_id() -> ProtocolId { - ProtocolId::from("test") - } - - pub fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { - let cfg = sc_peerset::SetConfig { - in_peers: 128, - out_peers: 128, - bootnodes: Default::default(), - reserved_only: false, - reserved_nodes: Default::default(), - }; - sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets: vec![cfg] }) - } - - pub fn dummy_header() -> sp_test_primitives::Header { - sp_test_primitives::Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - type Block = - sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - - fn send_receive(request: sender::Request, pool: &LocalPool) { - let client = Arc::new(substrate_test_runtime_client::new()); - let (handler, protocol_config) = - handler::LightClientRequestHandler::new(&protocol_id(), client); - pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = sender::LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - sender.inject_connected(PeerId::random()); - - sender.request(request).unwrap(); - let sender::OutEvent::SendRequest { pending_response, request, .. } = - block_on(sender.next()).unwrap(); - let (tx, rx) = oneshot::channel(); - block_on(protocol_config.inbound_queue.unwrap().send(IncomingRequest { - peer: PeerId::random(), - payload: request, - pending_response: tx, - })) - .unwrap(); - pool.spawner() - .spawn_obj( - async move { - pending_response.send(Ok(rx.await.unwrap().result.unwrap())).unwrap(); - } - .boxed() - .into(), - ) - .unwrap(); - - pool.spawner() - .spawn_obj(sender.for_each(|_| future::ready(())).boxed().into()) - .unwrap(); - } - - #[test] - fn send_receive_call() { - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - - let mut pool = LocalPool::new(); - send_receive(sender::Request::Call { request, sender: chan.0 }, &pool); - assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_execution_proof` - } - - #[test] - fn send_receive_read() { - let chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::Read { request, sender: chan.0 }, &pool); - assert_eq!( - Some(vec![42]), - pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() - ); - // ^--- from `DummyFetchChecker::check_read_proof` - } - - #[test] - fn send_receive_read_child() { - let chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool); - assert_eq!( - Some(vec![42]), - pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() - ); - // ^--- from `DummyFetchChecker::check_read_child_proof` - } - - #[test] - fn send_receive_header() { - sp_tracing::try_init_simple(); - let chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::Header { request, sender: chan.0 }, &pool); - // The remote does not know block 1: - assert_matches!(pool.run_until(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); - } - - #[test] - fn send_receive_changes() { - let chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool); - assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_changes_proof` - } -} diff --git a/client/network/src/light_client_requests/handler.rs b/client/network/src/light_client_requests/handler.rs index 43504edddd73a..a04c5e310a67e 100644 --- a/client/network/src/light_client_requests/handler.rs +++ b/client/network/src/light_client_requests/handler.rs @@ -32,17 +32,14 @@ use codec::{self, Decode, Encode}; use futures::{channel::mpsc, prelude::*}; use log::{debug, trace}; use prost::Message; -use sc_client_api::{light, StorageProof}; +use sc_client_api::StorageProof; use sc_peerset::ReputationChange; use sp_core::{ hexdisplay::HexDisplay, - storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageKey}, + storage::{ChildInfo, ChildType, PrefixedStorageKey}, }; -use sp_runtime::{ - generic::BlockId, - traits::{Block, Zero}, -}; -use std::{collections::BTreeMap, sync::Arc}; +use sp_runtime::{generic::BlockId, traits::Block}; +use std::sync::Arc; const LOG_TARGET: &str = "light-client-request-handler"; @@ -137,12 +134,12 @@ impl LightClientRequestHandler { self.on_remote_call_request(&peer, r)?, Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => self.on_remote_read_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) => - self.on_remote_header_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteHeaderRequest(_r)) => + return Err(HandleRequestError::BadRequest("Not supported.")), Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => self.on_remote_read_child_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => - self.on_remote_changes_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteChangesRequest(_r)) => + return Err(HandleRequestError::BadRequest("Not supported.")), None => return Err(HandleRequestError::BadRequest("Remote request without request data.")), }; @@ -285,106 +282,6 @@ impl LightClientRequestHandler { Ok(schema::v1::light::Response { response: Some(response) }) } - - fn on_remote_header_request( - &mut self, - peer: &PeerId, - request: &schema::v1::light::RemoteHeaderRequest, - ) -> Result { - trace!("Remote header proof request from {} ({:?}).", peer, request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - let (header, proof) = match self.client.header_proof(&BlockId::Number(block)) { - Ok((header, proof)) => (header.encode(), proof), - Err(error) => { - trace!( - "Remote header proof request from {} ({:?}) failed with: {}.", - peer, - request.block, - error - ); - (Default::default(), StorageProof::empty()) - }, - }; - - let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; - schema::v1::light::response::Response::RemoteHeaderResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_changes_request( - &mut self, - peer: &PeerId, - request: &schema::v1::light::RemoteChangesRequest, - ) -> Result { - trace!( - "Remote changes proof request from {} for key {} ({:?}..{:?}).", - peer, - if !request.storage_key.is_empty() { - format!( - "{} : {}", - HexDisplay::from(&request.storage_key), - HexDisplay::from(&request.key) - ) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last, - ); - - let first = Decode::decode(&mut request.first.as_ref())?; - let last = Decode::decode(&mut request.last.as_ref())?; - let min = Decode::decode(&mut request.min.as_ref())?; - let max = Decode::decode(&mut request.max.as_ref())?; - let key = StorageKey(request.key.clone()); - let storage_key = if request.storage_key.is_empty() { - None - } else { - Some(PrefixedStorageKey::new_ref(&request.storage_key)) - }; - - let proof = - match self.client.key_changes_proof(first, last, min, max, storage_key, &key) { - Ok(proof) => proof, - Err(error) => { - trace!( - "Remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}.", - peer, - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), - request.first, - request.last, - error, - ); - - light::ChangesProof:: { - max_block: Zero::zero(), - proof: Vec::new(), - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - }, - }; - - let response = { - let r = schema::v1::light::RemoteChangesResponse { - max: proof.max_block.encode(), - proof: proof.proof, - roots: proof - .roots - .into_iter() - .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) - .collect(), - roots_proof: proof.roots_proof.encode(), - }; - schema::v1::light::response::Response::RemoteChangesResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } } #[derive(derive_more::Display, derive_more::From)] diff --git a/client/network/src/light_client_requests/sender.rs b/client/network/src/light_client_requests/sender.rs deleted file mode 100644 index 284db827594b4..0000000000000 --- a/client/network/src/light_client_requests/sender.rs +++ /dev/null @@ -1,1294 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Helper for outgoing light client requests. -//! -//! Call [`LightClientRequestSender::request`](sender::LightClientRequestSender::request) -//! to send out light client requests. It will: -//! -//! 1. Build the request. -//! -//! 2. Forward the request to [`crate::request_responses::RequestResponsesBehaviour`] via -//! [`OutEvent::SendRequest`](sender::OutEvent::SendRequest). -//! -//! 3. Wait for the response and forward the response via the [`futures::channel::oneshot::Sender`] -//! provided earlier with [`LightClientRequestSender::request`](sender::LightClientRequestSender:: -//! request). - -use crate::{ - config::ProtocolId, - protocol::message::BlockAttributes, - request_responses::{OutboundFailure, RequestFailure}, - schema, PeerId, -}; -use codec::{self, Decode, Encode}; -use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use prost::Message; -use sc_client_api::light::{self, RemoteBodyRequest}; -use sc_peerset::ReputationChange; -use sp_blockchain::Error as ClientError; -use sp_runtime::traits::{Block, Header, NumberFor}; -use std::{ - collections::{BTreeMap, HashMap, VecDeque}, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -mod rep { - use super::*; - - /// Reputation change for a peer when a request timed out. - pub const TIMEOUT: ReputationChange = - ReputationChange::new(-(1 << 8), "light client request timeout"); - /// Reputation change for a peer when a request is refused. - pub const REFUSED: ReputationChange = - ReputationChange::new(-(1 << 8), "light client request refused"); -} - -/// Configuration options for [`LightClientRequestSender`]. -#[derive(Debug, Clone)] -struct Config { - max_pending_requests: usize, - light_protocol: String, - block_protocol: String, -} - -impl Config { - /// Create a new [`LightClientRequestSender`] configuration. - pub fn new(id: &ProtocolId) -> Self { - Self { - max_pending_requests: 128, - light_protocol: super::generate_protocol_name(id), - block_protocol: crate::block_request_handler::generate_protocol_name(id), - } - } -} - -/// State machine helping to send out light client requests. -pub struct LightClientRequestSender { - /// This behaviour's configuration. - config: Config, - /// Verifies that received responses are correct. - checker: Arc>, - /// Peer information (addresses, their best block, etc.) - peers: HashMap>, - /// Pending (local) requests. - pending_requests: VecDeque>, - /// Requests on their way to remote peers. - sent_requests: FuturesUnordered< - BoxFuture< - 'static, - (SentRequest, Result, RequestFailure>, oneshot::Canceled>), - >, - >, - /// Handle to use for reporting misbehaviour of peers. - peerset: sc_peerset::PeersetHandle, -} - -/// Augments a pending light client request with metadata. -#[derive(Debug)] -struct PendingRequest { - /// Remaining attempts. - attempts_left: usize, - /// The actual request. - request: Request, -} - -impl PendingRequest { - fn new(req: Request) -> Self { - Self { - // Number of retries + one for the initial attempt. - attempts_left: req.retries() + 1, - request: req, - } - } - - fn into_sent(self, peer_id: PeerId) -> SentRequest { - SentRequest { attempts_left: self.attempts_left, request: self.request, peer: peer_id } - } -} - -/// Augments a light client request with metadata that is currently being send to a remote. -#[derive(Debug)] -struct SentRequest { - /// Remaining attempts. - attempts_left: usize, - /// The actual request. - request: Request, - /// The peer that the request is send to. - peer: PeerId, -} - -impl SentRequest { - fn into_pending(self) -> PendingRequest { - PendingRequest { attempts_left: self.attempts_left, request: self.request } - } -} - -impl Unpin for LightClientRequestSender {} - -impl LightClientRequestSender -where - B: Block, -{ - /// Construct a new light client handler. - pub fn new( - id: &ProtocolId, - checker: Arc>, - peerset: sc_peerset::PeersetHandle, - ) -> Self { - Self { - config: Config::new(id), - checker, - peers: Default::default(), - pending_requests: Default::default(), - sent_requests: Default::default(), - peerset, - } - } - - /// We rely on external information about peers best blocks as we lack the - /// means to determine it ourselves. - pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { - if let Some(info) = self.peers.get_mut(peer) { - log::trace!("new best block for {:?}: {:?}", peer, num); - info.best_block = Some(num) - } - } - - /// Issue a new light client request. - pub fn request(&mut self, req: Request) -> Result<(), SendRequestError> { - if self.pending_requests.len() >= self.config.max_pending_requests { - return Err(SendRequestError::TooManyRequests) - } - self.pending_requests.push_back(PendingRequest::new(req)); - Ok(()) - } - - /// Remove the given peer. - /// - /// In-flight requests to the given peer might fail and be retried. See - /// [`::poll_next`]. - fn remove_peer(&mut self, peer: PeerId) { - self.peers.remove(&peer); - } - - /// Process a local request's response from remote. - /// - /// If successful, this will give us the actual, checked data we should be - /// sending back to the client, otherwise an error. - fn on_response( - &mut self, - peer: PeerId, - request: &Request, - response: Response, - ) -> Result, Error> { - log::trace!("response from {}", peer); - match response { - Response::Light(r) => self.on_response_light(request, r), - Response::Block(r) => self.on_response_block(request, r), - } - } - - fn on_response_light( - &mut self, - request: &Request, - response: schema::v1::light::Response, - ) -> Result, Error> { - use schema::v1::light::response::Response; - match response.response { - Some(Response::RemoteCallResponse(response)) => { - if let Request::Call { request, .. } = request { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_execution_proof(request, proof)?; - Ok(Reply::VecU8(reply)) - } else { - Err(Error::UnexpectedResponse) - } - }, - Some(Response::RemoteReadResponse(response)) => match request { - Request::Read { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - }, - Request::ReadChild { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - }, - _ => Err(Error::UnexpectedResponse), - }, - Some(Response::RemoteChangesResponse(response)) => { - if let Request::Changes { request, .. } = request { - let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; - let roots = { - let mut r = BTreeMap::new(); - for pair in response.roots { - let k = Decode::decode(&mut pair.fst.as_ref())?; - let v = Decode::decode(&mut pair.snd.as_ref())?; - r.insert(k, v); - } - r - }; - let reply = self.checker.check_changes_proof( - &request, - light::ChangesProof { - max_block, - proof: response.proof, - roots, - roots_proof, - }, - )?; - Ok(Reply::VecNumberU32(reply)) - } else { - Err(Error::UnexpectedResponse) - } - }, - Some(Response::RemoteHeaderResponse(response)) => { - if let Request::Header { request, .. } = request { - let header = if response.header.is_empty() { - None - } else { - Some(Decode::decode(&mut response.header.as_ref())?) - }; - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_header_proof(&request, header, proof)?; - Ok(Reply::Header(reply)) - } else { - Err(Error::UnexpectedResponse) - } - }, - None => Err(Error::UnexpectedResponse), - } - } - - fn on_response_block( - &mut self, - request: &Request, - response: schema::v1::BlockResponse, - ) -> Result, Error> { - let request = if let Request::Body { request, .. } = &request { - request - } else { - return Err(Error::UnexpectedResponse) - }; - - let body: Vec<_> = match response.blocks.into_iter().next() { - Some(b) => b.body, - None => return Err(Error::UnexpectedResponse), - }; - - let body = body - .into_iter() - .map(|extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) - .collect::>()?; - - let body = self.checker.check_body_proof(&request, body)?; - Ok(Reply::Extrinsics(body)) - } - - /// Signal that the node is connected to the given peer. - pub fn inject_connected(&mut self, peer: PeerId) { - let prev_entry = self.peers.insert(peer, Default::default()); - debug_assert!( - prev_entry.is_none(), - "Expect `inject_connected` to be called for disconnected peer.", - ); - } - - /// Signal that the node disconnected from the given peer. - pub fn inject_disconnected(&mut self, peer: PeerId) { - self.remove_peer(peer) - } -} - -impl Stream for LightClientRequestSender { - type Item = OutEvent; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - // If we have received responses to previously sent requests, check them and pass them on. - while let Poll::Ready(Some((sent_request, request_result))) = - self.sent_requests.poll_next_unpin(cx) - { - if let Some(info) = self.peers.get_mut(&sent_request.peer) { - if info.status != PeerStatus::Busy { - // If we get here, something is wrong with our internal handling of peer status - // information. At any time, a single peer processes at most one request from - // us. A malicious peer should not be able to get us here. It is our own fault - // and must be fixed! - panic!("unexpected peer status {:?} for {}", info.status, sent_request.peer); - } - - info.status = PeerStatus::Idle; // Make peer available again. - } - - let request_result = match request_result { - Ok(r) => r, - Err(oneshot::Canceled) => { - log::debug!("Oneshot for request to peer {} was canceled.", sent_request.peer); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("no response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - continue - }, - }; - - let decoded_request_result = request_result.map(|response| { - if sent_request.request.is_block_request() { - schema::v1::BlockResponse::decode(&response[..]).map(|r| Response::Block(r)) - } else { - schema::v1::light::Response::decode(&response[..]).map(|r| Response::Light(r)) - } - }); - - let response = match decoded_request_result { - Ok(Ok(response)) => response, - Ok(Err(e)) => { - log::debug!( - "Failed to decode response from peer {}: {:?}.", - sent_request.peer, - e - ); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("invalid response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - continue - }, - Err(e) => { - log::debug!("Request to peer {} failed with {:?}.", sent_request.peer, e); - - match e { - RequestFailure::NotConnected => { - self.remove_peer(sent_request.peer); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::UnknownProtocol => { - debug_assert!( - false, - "Light client and block request protocol should be known when \ - sending requests.", - ); - }, - RequestFailure::Refused => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer(sent_request.peer, rep::REFUSED); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Obsolete => { - debug_assert!( - false, - "Can not receive `RequestFailure::Obsolete` after dropping the \ - response receiver.", - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::Timeout) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer(sent_request.peer, rep::TIMEOUT); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal( - "peer does not support light client or block request protocol", - ), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::DialFailure) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("failed to dial peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::ConnectionClosed) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("connection to peer closed"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - } - - continue - }, - }; - - match self.on_response(sent_request.peer, &sent_request.request, response) { - Ok(reply) => sent_request.request.return_reply(Ok(reply)), - Err(Error::UnexpectedResponse) => { - log::debug!("Unexpected response from peer {}.", sent_request.peer); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("unexpected response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - Err(other) => { - log::debug!( - "error handling response from peer {}: {}", - sent_request.peer, - other - ); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("invalid response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()) - }, - } - } - - // If we have a pending request to send, try to find an available peer and send it. - while let Some(mut pending_request) = self.pending_requests.pop_front() { - if pending_request.attempts_left == 0 { - pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); - continue - } - - let protocol = if pending_request.request.is_block_request() { - self.config.block_protocol.clone() - } else { - self.config.light_protocol.clone() - }; - - // Out of all idle peers, find one who's best block is high enough, choose any idle peer - // if none exists. - let mut peer = None; - for (peer_id, peer_info) in self.peers.iter_mut() { - if peer_info.status == PeerStatus::Idle { - match peer_info.best_block { - Some(n) if n >= pending_request.request.required_block() => { - peer = Some((*peer_id, peer_info)); - break - }, - _ => peer = Some((*peer_id, peer_info)), - } - } - } - - // Break in case there is no idle peer. - let (peer_id, peer_info) = match peer { - Some((peer_id, peer_info)) => (peer_id, peer_info), - None => { - self.pending_requests.push_front(pending_request); - log::debug!("No peer available to send request to."); - - break - }, - }; - - let request_bytes = match pending_request.request.serialize_request() { - Ok(bytes) => bytes, - Err(error) => { - log::debug!("failed to serialize request: {}", error); - pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); - continue - }, - }; - - let (tx, rx) = oneshot::channel(); - - peer_info.status = PeerStatus::Busy; - - pending_request.attempts_left -= 1; - - self.sent_requests - .push(async move { (pending_request.into_sent(peer_id), rx.await) }.boxed()); - - return Poll::Ready(Some(OutEvent::SendRequest { - target: peer_id, - request: request_bytes, - pending_response: tx, - protocol_name: protocol, - })) - } - - Poll::Pending - } -} - -/// Events returned by [`LightClientRequestSender`]. -#[derive(Debug)] -pub enum OutEvent { - /// Emit a request to be send out on the network e.g. via [`crate::request_responses`]. - SendRequest { - /// The remote peer to send the request to. - target: PeerId, - /// The encoded request. - request: Vec, - /// The [`oneshot::Sender`] channel to pass the response to. - pending_response: oneshot::Sender, RequestFailure>>, - /// The name of the protocol to use to send the request. - protocol_name: String, - }, -} - -/// Incoming response from remote. -#[derive(Debug, Clone)] -pub enum Response { - /// Incoming light response from remote. - Light(schema::v1::light::Response), - /// Incoming block response from remote. - Block(schema::v1::BlockResponse), -} - -/// Error returned by [`LightClientRequestSender::request`]. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum SendRequestError { - /// There are currently too many pending request. - #[display(fmt = "too many pending requests")] - TooManyRequests, -} - -/// Error type to propagate errors internally. -#[derive(Debug, derive_more::Display, derive_more::From)] -enum Error { - /// The response type does not correspond to the issued request. - #[display(fmt = "unexpected response")] - UnexpectedResponse, - /// Encoding or decoding of some data failed. - #[display(fmt = "codec error: {}", _0)] - Codec(codec::Error), - /// The chain client errored. - #[display(fmt = "client error: {}", _0)] - Client(ClientError), -} - -/// The data to send back to the light client over the oneshot channel. -// It is unified here in order to be able to return it as a function -// result instead of delivering it to the client as a side effect of -// response processing. -#[derive(Debug)] -enum Reply { - VecU8(Vec), - VecNumberU32(Vec<(::Number, u32)>), - MapVecU8OptVecU8(HashMap, Option>>), - Header(B::Header), - Extrinsics(Vec), -} - -/// Information we have about some peer. -#[derive(Debug)] -struct PeerInfo { - best_block: Option>, - status: PeerStatus, -} - -impl Default for PeerInfo { - fn default() -> Self { - PeerInfo { best_block: None, status: PeerStatus::Idle } - } -} - -/// A peer is either idle or busy processing a request from us. -#[derive(Debug, Clone, PartialEq, Eq)] -enum PeerStatus { - /// The peer is available. - Idle, - /// We wait for the peer to return us a response for the given request ID. - Busy, -} - -/// The possible light client requests we support. -/// -/// The associated `oneshot::Sender` will be used to convey the result of -/// their request back to them (cf. `Reply`). -// This is modeled after light_dispatch.rs's `RequestData` which is not -// used because we currently only support a subset of those. -#[derive(Debug)] -pub enum Request { - /// Remote body request. - Body { - /// Request. - request: RemoteBodyRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, ClientError>>, - }, - /// Remote header request. - Header { - /// Request. - request: light::RemoteHeaderRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender>, - }, - /// Remote read request. - Read { - /// Request. - request: light::RemoteReadRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, Option>>, ClientError>>, - }, - /// Remote read child request. - ReadChild { - /// Request. - request: light::RemoteReadChildRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, Option>>, ClientError>>, - }, - /// Remote call request. - Call { - /// Request. - request: light::RemoteCallRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, ClientError>>, - }, - /// Remote changes request. - Changes { - /// Request. - request: light::RemoteChangesRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, u32)>, ClientError>>, - }, -} - -impl Request { - fn is_block_request(&self) -> bool { - matches!(self, Request::Body { .. }) - } - - fn required_block(&self) -> NumberFor { - match self { - Request::Body { request, .. } => *request.header.number(), - Request::Header { request, .. } => request.block, - Request::Read { request, .. } => *request.header.number(), - Request::ReadChild { request, .. } => *request.header.number(), - Request::Call { request, .. } => *request.header.number(), - Request::Changes { request, .. } => request.max_block.0, - } - } - - fn retries(&self) -> usize { - let rc = match self { - Request::Body { request, .. } => request.retry_count, - Request::Header { request, .. } => request.retry_count, - Request::Read { request, .. } => request.retry_count, - Request::ReadChild { request, .. } => request.retry_count, - Request::Call { request, .. } => request.retry_count, - Request::Changes { request, .. } => request.retry_count, - }; - rc.unwrap_or(0) - } - - fn serialize_request(&self) -> Result, prost::EncodeError> { - let request = match self { - Request::Body { request, .. } => { - let rq = schema::v1::BlockRequest { - fields: BlockAttributes::BODY.to_be_u32(), - from_block: Some(schema::v1::block_request::FromBlock::Hash( - request.header.hash().encode(), - )), - to_block: Default::default(), - direction: schema::v1::Direction::Ascending as i32, - max_blocks: 1, - support_multiple_justifications: true, - }; - - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - return Ok(buf) - }, - Request::Header { request, .. } => { - let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; - schema::v1::light::request::Request::RemoteHeaderRequest(r) - }, - Request::Read { request, .. } => { - let r = schema::v1::light::RemoteReadRequest { - block: request.block.encode(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadRequest(r) - }, - Request::ReadChild { request, .. } => { - let r = schema::v1::light::RemoteReadChildRequest { - block: request.block.encode(), - storage_key: request.storage_key.clone().into_inner(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadChildRequest(r) - }, - Request::Call { request, .. } => { - let r = schema::v1::light::RemoteCallRequest { - block: request.block.encode(), - method: request.method.clone(), - data: request.call_data.clone(), - }; - schema::v1::light::request::Request::RemoteCallRequest(r) - }, - Request::Changes { request, .. } => { - let r = schema::v1::light::RemoteChangesRequest { - first: request.first_block.1.encode(), - last: request.last_block.1.encode(), - min: request.tries_roots.1.encode(), - max: request.max_block.1.encode(), - storage_key: request - .storage_key - .clone() - .map(|s| s.into_inner()) - .unwrap_or_default(), - key: request.key.clone(), - }; - schema::v1::light::request::Request::RemoteChangesRequest(r) - }, - }; - - let rq = schema::v1::light::Request { request: Some(request) }; - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - Ok(buf) - } - - fn return_reply(self, result: Result, ClientError>) { - fn send(item: T, sender: oneshot::Sender) { - let _ = sender.send(item); // It is okay if the other end already hung up. - } - match self { - Request::Body { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), - }, - Request::Header { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Header(x)) => send(Ok(x), sender), - reply => { - log::error!("invalid reply for header request: {:?}, {:?}", reply, request) - }, - }, - Request::Read { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), - }, - Request::ReadChild { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => { - log::error!("invalid reply for read child request: {:?}, {:?}", reply, request) - }, - }, - Request::Call { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), - }, - Request::Changes { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), - reply => { - log::error!("invalid reply for changes request: {:?}, {:?}", reply, request) - }, - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - light_client_requests::tests::{dummy_header, peerset, protocol_id, DummyFetchChecker}, - request_responses::OutboundFailure, - }; - - use assert_matches::assert_matches; - use futures::{channel::oneshot, executor::block_on, poll}; - use sc_client_api::StorageProof; - use sp_core::storage::ChildInfo; - use sp_runtime::{generic::Header, traits::BlakeTwo256}; - use std::{collections::HashSet, iter::FromIterator}; - - fn empty_proof() -> Vec { - StorageProof::empty().encode() - } - - #[test] - fn removes_peer_if_told() { - let peer = PeerId::random(); - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(DummyFetchChecker { ok: true, _mark: std::marker::PhantomData }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len()); - - sender.inject_disconnected(peer); - assert_eq!(0, sender.peers.len()); - } - - type Block = - sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - - #[test] - fn body_request_fields_encoded_properly() { - let (sender, _receiver) = oneshot::channel(); - let request = Request::::Body { - request: RemoteBodyRequest { header: dummy_header(), retry_count: None }, - sender, - }; - let serialized_request = request.serialize_request().unwrap(); - let deserialized_request = - schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); - assert!(BlockAttributes::from_be_u32(deserialized_request.fields) - .unwrap() - .contains(BlockAttributes::BODY)); - } - - #[test] - fn disconnects_from_peer_if_request_times_out() { - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer0); - sender.inject_connected(peer1); - - assert_eq!( - HashSet::from_iter(&[peer0.clone(), peer1.clone()]), - sender.peers.keys().collect::>(), - "Expect knowledge of two peers." - ); - - assert!(sender.pending_requests.is_empty(), "Expect no pending request."); - assert!(sender.sent_requests.is_empty(), "Expect no sent request."); - - // Issue a request! - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); - - let OutEvent::SendRequest { target, pending_response, .. } = - block_on(sender.next()).unwrap(); - assert!(target == peer0 || target == peer1, "Expect request to originate from known peer."); - - // And we should have one busy peer. - assert!({ - let (idle, busy): (Vec<_>, Vec<_>) = - sender.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); - idle.len() == 1 && - busy.len() == 1 && (idle[0].0 == &peer0 || busy[0].0 == &peer0) && - (idle[0].0 == &peer1 || busy[0].0 == &peer1) - }); - - assert_eq!(0, sender.pending_requests.len(), "Expect no pending request."); - assert_eq!(1, sender.sent_requests.len(), "Expect one request to be sent."); - - // Report first attempt as timed out. - pending_response - .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) - .unwrap(); - - // Expect a new request to be issued. - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - - assert_eq!(1, sender.peers.len(), "Expect peer to be removed."); - assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); - assert_eq!(1, sender.sent_requests.len(), "Expect new request to be issued."); - - // Report second attempt as timed out. - pending_response - .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) - .unwrap(); - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt.", - ); - assert_matches!( - block_on(chan.1).unwrap(), - Err(ClientError::RemoteFetchFailed), - "Expect request failure to be reported.", - ); - assert_eq!(0, sender.peers.len(), "Expect no peer to be left"); - assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); - assert_eq!(0, sender.sent_requests.len(), "Expect no other request to be in progress."); - } - - #[test] - fn disconnects_from_peer_on_incorrect_response() { - let peer = PeerId::random(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: false, - // ^--- Making sure the response data check fails. - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len(), "Expect one peer."); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); - assert_eq!(0, sender.sent_requests.len(), "Expect zero sent requests."); - - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); - assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); - - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - let response = schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - }; - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - - pending_response.send(Ok(response)).unwrap(); - - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - - assert!(sender.peers.is_empty(), "Expect no peers to be left."); - assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); - assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); - } - - #[test] - fn disconnects_from_peer_on_wrong_response_type() { - let peer = PeerId::random(); - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len(), "Expect one peer."); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()); - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); - assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! - let response = schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - }; - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - - pending_response.send(Ok(response)).unwrap(); - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - - assert!(sender.peers.is_empty(), "Expect no peers to be left."); - assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); - assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); - } - - #[test] - fn receives_remote_failure_after_retry_count_failures() { - let peers = (0..4).map(|_| PeerId::random()).collect::>(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: false, - // ^--- Making sure the response data check fails. - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - for peer in &peers { - sender.inject_connected(*peer); - } - assert_eq!(4, sender.peers.len(), "Expect four peers."); - - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(3), // Attempt up to three retries. - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()); - let mut pending_response = match block_on(sender.next()).unwrap() { - OutEvent::SendRequest { pending_response, .. } => Some(pending_response), - }; - assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); - assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); - - for (i, _peer) in peers.iter().enumerate() { - // Construct an invalid response - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - let response = schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - }; - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - pending_response.take().unwrap().send(Ok(response)).unwrap(); - - if i < 3 { - pending_response = match block_on(sender.next()).unwrap() { - OutEvent::SendRequest { pending_response, .. } => Some(pending_response), - }; - assert_matches!(chan.1.try_recv(), Ok(None)) - } else { - // Last peer and last attempt. - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) - } - } - } - - fn issue_request(request: Request) { - let peer = PeerId::random(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len(), "Expect one peer."); - - let response = match request { - Request::Body { .. } => unimplemented!(), - Request::Header { .. } => { - let r = schema::v1::light::RemoteHeaderResponse { - header: dummy_header().encode(), - proof: empty_proof(), - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)), - } - }, - Request::Read { .. } => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - }, - Request::ReadChild { .. } => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - }, - Request::Call { .. } => { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }, - Request::Changes { .. } => { - let r = schema::v1::light::RemoteChangesResponse { - max: std::iter::repeat(1).take(32).collect(), - proof: Vec::new(), - roots: Vec::new(), - roots_proof: empty_proof(), - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), - } - }, - }; - - let response = { - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - - sender.request(request).unwrap(); - - assert_eq!(1, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()); - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - assert_eq!(0, sender.pending_requests.len()); - assert_eq!(1, sender.sent_requests.len()); - - pending_response.send(Ok(response)).unwrap(); - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - - assert_eq!(0, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()) - } - - #[test] - fn receives_remote_call_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - issue_request(Request::Call { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::Read { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_child_response() { - let mut chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::ReadChild { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_header_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - issue_request(Request::Header { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_changes_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - issue_request(Request::Changes { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } -} diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs deleted file mode 100644 index eaeb0bee98f2c..0000000000000 --- a/client/network/src/on_demand_layer.rs +++ /dev/null @@ -1,241 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! On-demand requests service. - -use crate::light_client_requests; - -use futures::{channel::oneshot, prelude::*}; -use parking_lot::Mutex; -use sc_client_api::{ - ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, - RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, - StorageProof, -}; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::Error as ClientError; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{ - collections::HashMap, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -/// Implements the `Fetcher` trait of the client. Makes it possible for the light client to perform -/// network requests for some state. -/// -/// This implementation stores all the requests in a queue. The network, in parallel, is then -/// responsible for pulling elements out of that queue and fulfilling them. -pub struct OnDemand { - /// Objects that checks whether what has been retrieved is correct. - checker: Arc>, - - /// Queue of requests. Set to `Some` at initialization, then extracted by the network. - /// - /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method - /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in - /// asynchronous Rust at the moment - requests_queue: - Mutex>>>, - - /// Sending side of `requests_queue`. - requests_send: TracingUnboundedSender>, -} - -#[derive(Debug, thiserror::Error)] -#[error("AlwaysBadChecker")] -struct ErrorAlwaysBadChecker; - -impl Into for ErrorAlwaysBadChecker { - fn into(self) -> ClientError { - ClientError::Application(Box::new(self)) - } -} - -/// Dummy implementation of `FetchChecker` that always assumes that responses are bad. -/// -/// Considering that it is the responsibility of the client to build the fetcher, it can use this -/// implementation if it knows that it will never perform any request. -#[derive(Default, Clone)] -pub struct AlwaysBadChecker; - -impl FetchChecker for AlwaysBadChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - _remote_header: Option, - _remote_proof: StorageProof, - ) -> Result { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_read_proof( - &self, - _request: &RemoteReadRequest, - _remote_proof: StorageProof, - ) -> Result, Option>>, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_read_child_proof( - &self, - _request: &RemoteReadChildRequest, - _remote_proof: StorageProof, - ) -> Result, Option>>, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_execution_proof( - &self, - _request: &RemoteCallRequest, - _remote_proof: StorageProof, - ) -> Result, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_changes_proof( - &self, - _request: &RemoteChangesRequest, - _remote_proof: ChangesProof, - ) -> Result, u32)>, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_body_proof( - &self, - _request: &RemoteBodyRequest, - _body: Vec, - ) -> Result, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } -} - -impl OnDemand -where - B::Header: HeaderT, -{ - /// Creates new on-demand service. - pub fn new(checker: Arc>) -> Self { - let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); - let requests_queue = Mutex::new(Some(requests_queue)); - - Self { checker, requests_queue, requests_send } - } - - /// Get checker reference. - pub fn checker(&self) -> &Arc> { - &self.checker - } - - /// Extracts the queue of requests. - /// - /// Whenever one of the methods of the `Fetcher` trait is called, an element is pushed on this - /// channel. - /// - /// If this function returns `None`, that means that the receiver has already been extracted in - /// the past, and therefore that something already handles the requests. - pub(crate) fn extract_receiver( - &self, - ) -> Option>> { - self.requests_queue.lock().take() - } -} - -impl Fetcher for OnDemand -where - B: BlockT, - B::Header: HeaderT, -{ - type RemoteHeaderResult = RemoteResponse; - type RemoteReadResult = RemoteResponse, Option>>>; - type RemoteCallResult = RemoteResponse>; - type RemoteChangesResult = RemoteResponse, u32)>>; - type RemoteBodyResult = RemoteResponse>; - - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Header { request, sender }); - RemoteResponse { receiver } - } - - fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Read { request, sender }); - RemoteResponse { receiver } - } - - fn remote_read_child( - &self, - request: RemoteReadChildRequest, - ) -> Self::RemoteReadResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::ReadChild { request, sender }); - RemoteResponse { receiver } - } - - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Call { request, sender }); - RemoteResponse { receiver } - } - - fn remote_changes( - &self, - request: RemoteChangesRequest, - ) -> Self::RemoteChangesResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Changes { request, sender }); - RemoteResponse { receiver } - } - - fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Body { request, sender }); - RemoteResponse { receiver } - } -} - -/// Future for an on-demand remote call response. -pub struct RemoteResponse { - receiver: oneshot::Receiver>, -} - -impl Future for RemoteResponse { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - match self.receiver.poll_unpin(cx) { - Poll::Ready(Ok(res)) => Poll::Ready(res), - Poll::Ready(Err(_)) => Poll::Ready(Err(ClientError::RemoteFetchCancelled)), - Poll::Pending => Poll::Pending, - } - } -} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 90e647505fa1f..caf4db89f653a 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -33,11 +33,9 @@ use crate::{ config::{parse_str_addr, Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, - light_client_requests, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, - on_demand_layer::AlwaysBadChecker, protocol::{ self, event::Event, @@ -238,12 +236,6 @@ impl NetworkWorker { } })?; - let checker = params - .on_demand - .as_ref() - .map(|od| od.checker().clone()) - .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); - let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); @@ -255,14 +247,6 @@ impl NetworkWorker { params.network_config.client_version, params.network_config.node_name ); - let light_client_request_sender = { - light_client_requests::sender::LightClientRequestSender::new( - ¶ms.protocol_id, - checker, - peerset_handle.clone(), - ) - }; - let discovery_config = { let mut config = DiscoveryConfig::new(local_public.clone()); config.with_permanent_addresses(known_addresses); @@ -347,7 +331,6 @@ impl NetworkWorker { protocol, user_agent, local_public, - light_client_request_sender, discovery_config, params.block_request_protocol_config, params.state_request_protocol_config, @@ -447,7 +430,6 @@ impl NetworkWorker { service, import_queue: params.import_queue, from_service, - light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, tx_handler_controller, @@ -1464,8 +1446,6 @@ pub struct NetworkWorker { import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. from_service: TracingUnboundedReceiver>, - /// Receiver for queries from the light client that must be processed. - light_client_rqs: Option>>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. @@ -1489,23 +1469,6 @@ impl Future for NetworkWorker { this.import_queue .poll_actions(cx, &mut NetworkLink { protocol: &mut this.network_service }); - // Check for new incoming light client requests. - if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { - while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { - let result = this.network_service.behaviour_mut().light_client_request(rq); - match result { - Ok(()) => {}, - Err(light_client_requests::sender::SendRequestError::TooManyRequests) => { - warn!("Couldn't start light client request: too many pending requests"); - }, - } - - if let Some(metrics) = this.metrics.as_ref() { - metrics.issued_light_requests.inc(); - } - } - } - // At the time of writing of this comment, due to a high volume of messages, the network // worker sometimes takes a long time to process the loop below. When that happens, the // rest of the polling is frozen. In order to avoid negative side-effects caused by this diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 69b172d07edfe..1c66986e422fc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -116,7 +116,6 @@ fn build_test_full_node( }), network_config: config, chain: client.clone(), - on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), protocol_id, import_queue, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 084b09fd65f8f..cfd42327b6003 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -48,13 +48,13 @@ use sc_consensus::{ }; pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - block_request_handler::{self, BlockRequestHandler}, + block_request_handler::BlockRequestHandler, config::{ MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, ProtocolConfig, ProtocolId, Role, SyncMode, TransportConfig, }, - light_client_requests::{self, handler::LightClientRequestHandler}, - state_request_handler::{self, StateRequestHandler}, + light_client_requests::handler::LightClientRequestHandler, + state_request_handler::StateRequestHandler, warp_request_handler, Multiaddr, NetworkService, NetworkWorker, }; use sc_service::client::Client; @@ -133,25 +133,20 @@ pub type PeersFullClient = Client< Block, substrate_test_runtime_client::runtime::RuntimeApi, >; -pub type PeersLightClient = Client< - substrate_test_runtime_client::LightBackend, - substrate_test_runtime_client::LightExecutor, - Block, - substrate_test_runtime_client::runtime::RuntimeApi, ->; #[derive(Clone)] -pub enum PeersClient { - Full(Arc, Arc), - Light(Arc, Arc), +pub struct PeersClient { + client: Arc, + backend: Arc, } impl PeersClient { - pub fn as_full(&self) -> Option> { - match *self { - PeersClient::Full(ref client, _) => Some(client.clone()), - _ => None, - } + pub fn as_client(&self) -> Arc { + self.client.clone() + } + + pub fn as_backend(&self) -> Arc { + self.backend.clone() } pub fn as_block_import(&self) -> BlockImportAdapter { @@ -159,27 +154,18 @@ impl PeersClient { } pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { - match *self { - PeersClient::Full(ref client, _) => client.get_aux(key), - PeersClient::Light(ref client, _) => client.get_aux(key), - } + self.client.get_aux(key) } pub fn info(&self) -> BlockchainInfo { - match *self { - PeersClient::Full(ref client, _) => client.chain_info(), - PeersClient::Light(ref client, _) => client.chain_info(), - } + self.client.info() } pub fn header( &self, block: &BlockId, ) -> ClientResult::Header>> { - match *self { - PeersClient::Full(ref client, _) => client.header(block), - PeersClient::Light(ref client, _) => client.header(block), - } + self.client.header(block) } pub fn has_state_at(&self, block: &BlockId) -> bool { @@ -187,33 +173,19 @@ impl PeersClient { Some(header) => header, None => return false, }; - match self { - PeersClient::Full(_client, backend) => - backend.have_state_at(&header.hash(), *header.number()), - PeersClient::Light(_client, backend) => - backend.have_state_at(&header.hash(), *header.number()), - } + self.backend.have_state_at(&header.hash(), *header.number()) } pub fn justifications(&self, block: &BlockId) -> ClientResult> { - match *self { - PeersClient::Full(ref client, _) => client.justifications(block), - PeersClient::Light(ref client, _) => client.justifications(block), - } + self.client.justifications(block) } pub fn finality_notification_stream(&self) -> FinalityNotifications { - match *self { - PeersClient::Full(ref client, _) => client.finality_notification_stream(), - PeersClient::Light(ref client, _) => client.finality_notification_stream(), - } + self.client.finality_notification_stream() } pub fn import_notification_stream(&self) -> ImportNotifications { - match *self { - PeersClient::Full(ref client, _) => client.import_notification_stream(), - PeersClient::Light(ref client, _) => client.import_notification_stream(), - } + self.client.import_notification_stream() } pub fn finalize_block( @@ -222,12 +194,7 @@ impl PeersClient { justification: Option, notify: bool, ) -> ClientResult<()> { - match *self { - PeersClient::Full(ref client, ref _backend) => - client.finalize_block(id, justification, notify), - PeersClient::Light(ref client, ref _backend) => - client.finalize_block(id, justification, notify), - } + self.client.finalize_block(id, justification, notify) } } @@ -240,10 +207,7 @@ impl BlockImport for PeersClient { &mut self, block: BlockCheckParams, ) -> Result { - match self { - PeersClient::Full(client, _) => client.check_block(block).await, - PeersClient::Light(client, _) => client.check_block(block).await, - } + self.client.check_block(block).await } async fn import_block( @@ -251,12 +215,7 @@ impl BlockImport for PeersClient { block: BlockImportParams, cache: HashMap>, ) -> Result { - match self { - PeersClient::Full(client, _) => - client.import_block(block.clear_storage_changes_and_mutate(), cache).await, - PeersClient::Light(client, _) => - client.import_block(block.clear_storage_changes_and_mutate(), cache).await, - } + self.client.import_block(block.clear_storage_changes_and_mutate(), cache).await } } @@ -370,8 +329,7 @@ where BlockBuilder, ) -> Block, { - let full_client = - self.client.as_full().expect("blocks could only be generated by full clients"); + let full_client = self.client.as_client(); let mut at = full_client.header(&at).unwrap().unwrap().hash(); for _ in 0..count { let builder = @@ -779,11 +737,11 @@ where let (c, longest_chain) = test_client_builder.build_with_longest_chain(); let client = Arc::new(c); - let (block_import, justification_import, data) = - self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); + let (block_import, justification_import, data) = self + .make_block_import(PeersClient { client: client.clone(), backend: backend.clone() }); let verifier = self.make_verifier( - PeersClient::Full(client.clone(), backend.clone()), + PeersClient { client: client.clone(), backend: backend.clone() }, &Default::default(), &data, ); @@ -868,7 +826,6 @@ where }), network_config, chain: client.clone(), - on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, import_queue, @@ -899,7 +856,7 @@ where peers.push(Peer { data, - client: PeersClient::Full(client.clone(), backend.clone()), + client: PeersClient { client: client.clone(), backend: backend.clone() }, select_chain: Some(longest_chain), backend: Some(backend), imported_blocks_stream, @@ -912,94 +869,6 @@ where }); } - /// Add a light peer. - fn add_light_peer(&mut self) { - let (c, backend) = substrate_test_runtime_client::new_light(); - let client = Arc::new(c); - let (block_import, justification_import, data) = - self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); - - let verifier = self.make_verifier( - PeersClient::Light(client.clone(), backend.clone()), - &Default::default(), - &data, - ); - let verifier = VerifierAdapter::new(verifier); - - let import_queue = Box::new(BasicQueue::new( - verifier.clone(), - Box::new(block_import.clone()), - justification_import, - &sp_core::testing::TaskExecutor::new(), - None, - )); - - let listen_addr = build_multiaddr![Memory(rand::random::())]; - - let mut network_config = - NetworkConfiguration::new("test-node", "test-client", Default::default(), None); - network_config.transport = TransportConfig::MemoryOnly; - network_config.listen_addresses = vec![listen_addr.clone()]; - network_config.allow_non_globals_in_dht = true; - - let protocol_id = ProtocolId::from("test-protocol-name"); - - let block_request_protocol_config = - block_request_handler::generate_protocol_config(&protocol_id); - let state_request_protocol_config = - state_request_handler::generate_protocol_config(&protocol_id); - - let light_client_request_protocol_config = - light_client_requests::generate_protocol_config(&protocol_id); - - let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Light, - executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), - network_config, - chain: client.clone(), - on_demand: None, - transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id, - import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator), - metrics_registry: None, - block_request_protocol_config, - state_request_protocol_config, - light_client_request_protocol_config, - warp_sync: None, - }) - .unwrap(); - - self.mut_peers(|peers| { - for peer in peers.iter_mut() { - peer.network.add_known_address( - network.service().local_peer_id().clone(), - listen_addr.clone(), - ); - } - - let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = - Box::pin(client.finality_notification_stream().fuse()); - - peers.push(Peer { - data, - verifier, - select_chain: None, - backend: None, - block_import, - client: PeersClient::Light(client, backend), - imported_blocks_stream, - finality_notification_stream, - network, - listen_addr, - }); - }); - } - /// Used to spawn background tasks, e.g. the block request protocol handler. fn spawn_task(&self, f: BoxFuture<'static, ()>) { async_std::task::spawn(f); diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index ff62b5476d1e6..4828cae842218 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -20,7 +20,6 @@ use super::*; use futures::{executor::block_on, Future}; use sp_consensus::{block_validation::Validation, BlockOrigin}; use sp_runtime::Justifications; -use std::time::Duration; use substrate_test_runtime::Header; fn test_ancestor_search_when_common_is(n: usize) { @@ -391,35 +390,6 @@ fn own_blocks_are_announced() { (net.peers()[2].blockchain_canon_equals(peer0)); } -#[test] -fn blocks_are_not_announced_by_light_nodes() { - sp_tracing::try_init_simple(); - let mut net = TestNet::new(0); - - // full peer0 is connected to light peer - // light peer1 is connected to full peer2 - net.add_full_peer(); - net.add_light_peer(); - - // Sync between 0 and 1. - net.peer(0).push_blocks(1, false); - assert_eq!(net.peer(0).client.info().best_number, 1); - net.block_until_sync(); - assert_eq!(net.peer(1).client.info().best_number, 1); - - // Add another node and remove node 0. - net.add_full_peer(); - net.peers.remove(0); - - // Poll for a few seconds and make sure 1 and 2 (now 0 and 1) don't sync together. - let mut delay = futures_timer::Delay::new(Duration::from_secs(5)); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - Pin::new(&mut delay).poll(cx) - })); - assert_eq!(net.peer(1).client.info().best_number, 0); -} - #[test] fn can_sync_small_non_best_forks() { sp_tracing::try_init_simple(); @@ -483,72 +453,6 @@ fn can_sync_small_non_best_forks() { })); } -#[test] -fn can_not_sync_from_light_peer() { - sp_tracing::try_init_simple(); - - // given the network with 1 full nodes (#0) and 1 light node (#1) - let mut net = TestNet::new(1); - net.add_light_peer(); - - // generate some blocks on #0 - net.peer(0).push_blocks(1, false); - - // and let the light client sync from this node - net.block_until_sync(); - - // ensure #0 && #1 have the same best block - let full0_info = net.peer(0).client.info(); - let light_info = net.peer(1).client.info(); - assert_eq!(full0_info.best_number, 1); - assert_eq!(light_info.best_number, 1); - assert_eq!(light_info.best_hash, full0_info.best_hash); - - // add new full client (#2) && remove #0 - net.add_full_peer(); - net.peers.remove(0); - - // ensure that the #2 (now #1) fails to sync block #1 even after 5 seconds - let mut test_finished = futures_timer::Delay::new(Duration::from_secs(5)); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - Pin::new(&mut test_finished).poll(cx) - })); -} - -#[test] -fn light_peer_imports_header_from_announce() { - sp_tracing::try_init_simple(); - - fn import_with_announce(net: &mut TestNet, hash: H256) { - net.peer(0).announce_block(hash, None); - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { - Poll::Ready(()) - } else { - Poll::Pending - } - })); - } - - // given the network with 1 full nodes (#0) and 1 light node (#1) - let mut net = TestNet::new(1); - net.add_light_peer(); - - // let them connect to each other - net.block_until_sync(); - - // check that NEW block is imported from announce message - let new_hash = net.peer(0).push_blocks(1, false); - import_with_announce(&mut net, new_hash); - - // check that KNOWN STALE block is imported from announce message - let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); - import_with_announce(&mut net, known_stale_hash); -} - #[test] fn can_sync_explicit_forks() { sp_tracing::try_init_simple(); @@ -1210,16 +1114,14 @@ fn syncs_indexed_blocks() { assert!(net .peer(0) .client() - .as_full() - .unwrap() + .as_client() .indexed_transaction(&indexed_key) .unwrap() .is_some()); assert!(net .peer(1) .client() - .as_full() - .unwrap() + .as_client() .indexed_transaction(&indexed_key) .unwrap() .is_none()); @@ -1228,8 +1130,7 @@ fn syncs_indexed_blocks() { assert!(net .peer(1) .client() - .as_full() - .unwrap() + .as_client() .indexed_transaction(&indexed_key) .unwrap() .is_some()); diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs deleted file mode 100644 index 2d15c819e1dab..0000000000000 --- a/client/rpc/src/chain/chain_light.rs +++ /dev/null @@ -1,114 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Blockchain API backend for light nodes. - -use futures::{future::ready, FutureExt, TryFutureExt}; -use jsonrpc_pubsub::manager::SubscriptionManager; -use std::sync::Arc; - -use sc_client_api::light::{Fetcher, RemoteBlockchain, RemoteBodyRequest}; -use sp_runtime::{ - generic::{BlockId, SignedBlock}, - traits::Block as BlockT, -}; - -use super::{client_err, error::FutureResult, ChainBackend}; -use sc_client_api::BlockchainEvents; -use sp_blockchain::HeaderBackend; - -/// Blockchain API backend for light nodes. Reads all the data from local -/// database, if available, or fetches it from remote node otherwise. -pub struct LightChain { - /// Substrate client. - client: Arc, - /// Current subscriptions. - subscriptions: SubscriptionManager, - /// Remote blockchain reference - remote_blockchain: Arc>, - /// Remote fetcher reference. - fetcher: Arc, -} - -impl> LightChain { - /// Create new Chain API RPC handler. - pub fn new( - client: Arc, - subscriptions: SubscriptionManager, - remote_blockchain: Arc>, - fetcher: Arc, - ) -> Self { - Self { client, subscriptions, remote_blockchain, fetcher } - } -} - -impl ChainBackend for LightChain -where - Block: BlockT + 'static, - Block::Header: Unpin, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + Send + Sync + 'static, -{ - fn client(&self) -> &Arc { - &self.client - } - - fn subscriptions(&self) -> &SubscriptionManager { - &self.subscriptions - } - - fn header(&self, hash: Option) -> FutureResult> { - let hash = self.unwrap_or_best(hash); - - let fetcher = self.fetcher.clone(); - let maybe_header = sc_client_api::light::future_header( - &*self.remote_blockchain, - &*fetcher, - BlockId::Hash(hash), - ); - - maybe_header.then(move |result| ready(result.map_err(client_err))).boxed() - } - - fn block(&self, hash: Option) -> FutureResult>> { - let fetcher = self.fetcher.clone(); - self.header(hash) - .and_then(move |header| async move { - match header { - Some(header) => { - let body = fetcher - .remote_body(RemoteBodyRequest { - header: header.clone(), - retry_count: Default::default(), - }) - .await; - - body.map(|body| { - Some(SignedBlock { - block: Block::new(header, body), - justifications: None, - }) - }) - .map_err(client_err) - }, - None => Ok(None), - } - }) - .boxed() - } -} diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index 9428ac3248f32..c2f512c338b11 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -19,7 +19,6 @@ //! Substrate blockchain API. mod chain_full; -mod chain_light; #[cfg(test)] mod tests; @@ -33,10 +32,7 @@ use rpc::{ use std::sync::Arc; use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; -use sc_client_api::{ - light::{Fetcher, RemoteBlockchain}, - BlockchainEvents, -}; +use sc_client_api::BlockchainEvents; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ generic::{BlockId, SignedBlock}, @@ -204,29 +200,6 @@ where Chain { backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)) } } -/// Create new state API that works on light node. -pub fn new_light>( - client: Arc, - subscriptions: SubscriptionManager, - remote_blockchain: Arc>, - fetcher: Arc, -) -> Chain -where - Block: BlockT + 'static, - Block::Header: Unpin, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, - F: Send + Sync + 'static, -{ - Chain { - backend: Box::new(self::chain_light::LightChain::new( - client, - subscriptions, - remote_blockchain, - fetcher, - )), - } -} - /// Chain API with subscriptions support. pub struct Chain { backend: Box>, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 97f77a4077962..d3b41d2f18279 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -26,11 +26,7 @@ use futures::{ use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use log::warn; use rpc::Result as RpcResult; -use std::{ - collections::{BTreeMap, HashMap}, - ops::Range, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use sc_rpc_api::state::ReadProof; use sp_blockchain::{ @@ -43,10 +39,7 @@ use sp_core::{ }, Bytes, }; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, CheckedSub, NumberFor, SaturatedConversion}, -}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_version::RuntimeVersion; use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; @@ -66,14 +59,6 @@ use std::marker::PhantomData; struct QueryStorageRange { /// Hashes of all the blocks in the range. pub hashes: Vec, - /// Number of the first block in the range. - pub first_number: NumberFor, - /// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at - /// each state to get changes. - pub unfiltered_range: Range, - /// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter - /// blocks-with-changes by using changes tries. - pub filtered_range: Option>, } /// State API backend for full nodes. @@ -107,10 +92,8 @@ where Ok(hash.unwrap_or_else(|| self.client.info().best_hash)) } - /// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges. - /// Blocks that contain changes within filtered subrange could be filtered using changes tries. - /// Blocks that contain changes within unfiltered subrange must be filtered manually. - fn split_query_storage_range( + /// Validates block range. + fn query_storage_range( &self, from: Block::Hash, to: Option, @@ -156,23 +139,7 @@ where hashes }; - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let changes_trie_range = self - .client - .max_key_changes_range(from_number, BlockId::Hash(to_meta.hash)) - .map_err(client_err)?; - let filtered_range_begin = changes_trie_range.and_then(|(begin, _)| { - // avoids a corner case where begin < from_number (happens when querying genesis) - begin.checked_sub(&from_number).map(|x| x.saturated_into::()) - }); - let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin); - - Ok(QueryStorageRange { - hashes, - first_number: from_number, - unfiltered_range, - filtered_range, - }) + Ok(QueryStorageRange { hashes }) } /// Iterates through range.unfiltered_range and check each block for changes of keys' values. @@ -183,8 +150,8 @@ where last_values: &mut HashMap>, changes: &mut Vec>, ) -> Result<()> { - for block in range.unfiltered_range.start..range.unfiltered_range.end { - let block_hash = range.hashes[block].clone(); + for block_hash in &range.hashes { + let block_hash = block_hash.clone(); let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; let id = BlockId::hash(block_hash); @@ -207,57 +174,6 @@ where } Ok(()) } - - /// Iterates through all blocks that are changing keys within range.filtered_range and collects - /// these changes. - fn query_storage_filtered( - &self, - range: &QueryStorageRange, - keys: &[StorageKey], - last_values: &HashMap>, - changes: &mut Vec>, - ) -> Result<()> { - let (begin, end) = match range.filtered_range { - Some(ref filtered_range) => ( - range.first_number + filtered_range.start.saturated_into(), - BlockId::Hash(range.hashes[filtered_range.end - 1].clone()), - ), - None => return Ok(()), - }; - let mut changes_map: BTreeMap, StorageChangeSet> = - BTreeMap::new(); - for key in keys { - let mut last_block = None; - let mut last_value = last_values.get(key).cloned().unwrap_or_default(); - let key_changes = self.client.key_changes(begin, end, None, key).map_err(client_err)?; - for (block, _) in key_changes.into_iter().rev() { - if last_block == Some(block) { - continue - } - - let block_hash = - range.hashes[(block - range.first_number).saturated_into::()].clone(); - let id = BlockId::Hash(block_hash); - let value_at_block = self.client.storage(&id, key).map_err(client_err)?; - if last_value == value_at_block { - continue - } - - changes_map - .entry(block) - .or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() }) - .changes - .push((key.clone(), value_at_block.clone())); - last_block = Some(block); - last_value = value_at_block; - } - } - if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) { - changes.reserve(additional_capacity); - } - changes.extend(changes_map.into_iter().map(|(_, cs)| cs)); - Ok(()) - } } impl StateBackend for FullState @@ -430,11 +346,10 @@ where keys: Vec, ) -> FutureResult>> { let call_fn = move || { - let range = self.split_query_storage_range(from, to)?; + let range = self.query_storage_range(from, to)?; let mut changes = Vec::new(); let mut last_values = HashMap::new(); self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?; - self.query_storage_filtered(&range, &keys, &last_values, &mut changes)?; Ok(changes) }; @@ -768,30 +683,6 @@ where } } -/// Splits passed range into two subranges where: -/// - first range has at least one element in it; -/// - second range (optionally) starts at given `middle` element. -pub(crate) fn split_range( - size: usize, - middle: Option, -) -> (Range, Option>) { - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let range2_begin = match middle { - // some of required changes tries are pruned => use available tries - Some(middle) if middle != 0 => Some(middle), - // all required changes tries are available, but we still want values at first block - // => do 'unfiltered' read for the first block and 'filtered' for the rest - Some(_) if size > 1 => Some(1), - // range contains single element => do not use changes tries - Some(_) => None, - // changes tries are not available => do 'unfiltered' read for the whole range - None => None, - }; - let range1 = 0..range2_begin.unwrap_or(size); - let range2 = range2_begin.map(|begin| begin..size); - (range1, range2) -} - fn invalid_block_range( from: &CachedHeaderMetadata, to: &CachedHeaderMetadata, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 712fe00c54386..d360701c88b2a 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -17,16 +17,15 @@ // along with this program. If not, see . use self::error::Error; -use super::{state_full::split_range, *}; +use super::*; use crate::testing::TaskExecutor; use assert_matches::assert_matches; use futures::{executor, StreamExt}; use sc_block_builder::BlockBuilderProvider; use sc_rpc_api::DenyUnsafe; use sp_consensus::BlockOrigin; -use sp_core::{hash::H256, storage::ChildInfo, ChangesTrieConfiguration}; +use sp_core::{hash::H256, storage::ChildInfo}; use sp_io::hashing::blake2_256; -use sp_runtime::generic::BlockId; use std::sync::Arc; use substrate_test_runtime_client::{prelude::*, runtime}; @@ -336,7 +335,7 @@ fn should_send_initial_storage_changes_and_notifications() { #[test] fn should_query_storage() { - fn run_tests(mut client: Arc, has_changes_trie_config: bool) { + fn run_tests(mut client: Arc) { let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -369,13 +368,6 @@ fn should_query_storage() { let block2_hash = add_block(1); let genesis_hash = client.genesis_hash(); - if has_changes_trie_config { - assert_eq!( - client.max_key_changes_range(1, BlockId::Hash(block1_hash)).unwrap(), - Some((0, BlockId::Hash(block1_hash))), - ); - } - let mut expected = vec![ StorageChangeSet { block: genesis_hash, @@ -519,24 +511,8 @@ fn should_query_storage() { ); } - run_tests(Arc::new(substrate_test_runtime_client::new()), false); - run_tests( - Arc::new( - TestClientBuilder::new() - .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) - .build(), - ), - true, - ); -} - -#[test] -fn should_split_ranges() { - assert_eq!(split_range(1, None), (0..1, None)); - assert_eq!(split_range(100, None), (0..100, None)); - assert_eq!(split_range(1, Some(0)), (0..1, None)); - assert_eq!(split_range(100, Some(50)), (0..50, Some(50..100))); - assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); + run_tests(Arc::new(substrate_test_runtime_client::new())); + run_tests(Arc::new(TestClientBuilder::new().build())); } #[test] diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 88ba6282b5f4e..718cfce981f1a 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -30,9 +30,8 @@ use log::info; use prometheus_endpoint::Registry; use sc_chain_spec::get_extension; use sc_client_api::{ - execution_extensions::ExecutionExtensions, light::RemoteBlockchain, - proof_provider::ProofProvider, BadBlocks, BlockBackend, BlockchainEvents, ExecutorProvider, - ForkBlocks, StorageProvider, UsageProvider, + execution_extensions::ExecutionExtensions, proof_provider::ProofProvider, BadBlocks, + BlockBackend, BlockchainEvents, ExecutorProvider, ForkBlocks, StorageProvider, UsageProvider, }; use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; @@ -40,7 +39,7 @@ use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; use sc_network::{ block_request_handler::{self, BlockRequestHandler}, - config::{OnDemand, Role, SyncMode}, + config::{Role, SyncMode}, light_client_requests::{self, handler::LightClientRequestHandler}, state_request_handler::{self, StateRequestHandler}, warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, @@ -381,23 +380,19 @@ where pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// The service configuration. pub config: Configuration, - /// A shared client returned by `new_full_parts`/`new_light_parts`. + /// A shared client returned by `new_full_parts`. pub client: Arc, - /// A shared backend returned by `new_full_parts`/`new_light_parts`. + /// A shared backend returned by `new_full_parts`. pub backend: Arc, - /// A task manager returned by `new_full_parts`/`new_light_parts`. + /// A task manager returned by `new_full_parts`. pub task_manager: &'a mut TaskManager, - /// A shared keystore returned by `new_full_parts`/`new_light_parts`. + /// A shared keystore returned by `new_full_parts`. pub keystore: SyncCryptoStorePtr, - /// An optional, shared data fetcher for light clients. - pub on_demand: Option>>, /// A shared transaction pool. pub transaction_pool: Arc, /// A RPC extension builder. Use `NoopRpcExtensionBuilder` if you just want to pass in the /// extensions directly. pub rpc_extensions_builder: Box + Send>, - /// An optional, shared remote blockchain instance. Used for light clients. - pub remote_blockchain: Option>>, /// A shared network instance. pub network: Arc::Hash>>, /// A Sender for RPC requests. @@ -475,12 +470,10 @@ where mut config, task_manager, client, - on_demand: _, backend, keystore, transaction_pool, rpc_extensions_builder, - remote_blockchain: _, network, system_rpc_tx, telemetry, @@ -725,7 +718,7 @@ where pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// The service configuration. pub config: &'a Configuration, - /// A shared client returned by `new_full_parts`/`new_light_parts`. + /// A shared client returned by `new_full_parts`. pub client: Arc, /// A shared transaction pool. pub transaction_pool: Arc, @@ -733,8 +726,6 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { pub spawn_handle: SpawnTaskHandle, /// An import queue. pub import_queue: TImpQu, - /// An optional, shared data fetcher for light clients. - pub on_demand: Option>>, /// A block announce validator builder. pub block_announce_validator_builder: Option) -> Box + Send> + Send>>, @@ -773,7 +764,6 @@ where transaction_pool, spawn_handle, import_queue, - on_demand, block_announce_validator_builder, warp_sync, } = params; @@ -869,7 +859,6 @@ where }, network_config: config.network.clone(), chain: client.clone(), - on_demand, transaction_pool: transaction_pool_adapter as _, import_queue: Box::new(import_queue), protocol_id, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index d7a8b6f227e8f..be871cc371ed5 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -26,10 +26,7 @@ use sp_core::{ NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, NumberFor}, -}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof, @@ -153,8 +150,6 @@ where extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let changes_trie = - backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let state = self.backend.state_at(*at)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = @@ -168,7 +163,6 @@ where let return_data = StateMachine::new( &state, - changes_trie, &mut changes, &self.executor, method, @@ -208,8 +202,6 @@ where where ExecutionManager: Clone, { - let changes_trie_state = - backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); let state = self.backend.state_at(*at)?; @@ -243,7 +235,6 @@ where let mut state_machine = StateMachine::new( &backend, - changes_trie_state, changes, &self.executor, method, @@ -262,7 +253,6 @@ where None => { let mut state_machine = StateMachine::new( &state, - changes_trie_state, changes, &self.executor, method, @@ -286,11 +276,9 @@ where fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let changes_trie_state = - backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; let state = self.backend.state_at(*id)?; let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &state, changes_trie_state, None); + let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; @@ -317,7 +305,7 @@ where state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, at)?; - sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( + sp_state_machine::prove_execution_on_trie_backend( &trie_backend, &mut Default::default(), &self.executor, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 6ce2feb050759..b46c6b99b9a9c 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -23,7 +23,6 @@ use super::{ genesis, }; use codec::{Decode, Encode}; -use hash_db::Prefix; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; @@ -31,18 +30,15 @@ use rand::Rng; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider, RecordProof}; use sc_client_api::{ backend::{ - self, apply_aux, changes_tries_state_at_block, BlockImportOperation, ClientImportOperation, - Finalizer, ImportSummary, LockImportRun, NewBlockState, PrunableStateChangesTrieStorage, - StorageProvider, + self, apply_aux, BlockImportOperation, ClientImportOperation, Finalizer, ImportSummary, + LockImportRun, NewBlockState, StorageProvider, }, - cht, client::{ BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo, FinalityNotification, FinalityNotifications, ForkBlocks, ImportNotifications, ProvideUncles, }, execution_extensions::ExecutionExtensions, - light::ChangesProof, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeyIterator, ProofProvider, UsageProvider, }; @@ -56,39 +52,36 @@ use sp_api::{ ProvideRuntimeApi, }; use sp_blockchain::{ - self as blockchain, well_known_cache_keys::Id as CacheKeyId, Backend as ChainBackend, Cache, - CachedHeaderMetadata, Error, HeaderBackend as ChainHeaderBackend, HeaderMetadata, ProvideCache, + self as blockchain, well_known_cache_keys::Id as CacheKeyId, Backend as ChainBackend, + CachedHeaderMetadata, Error, HeaderBackend as ChainHeaderBackend, HeaderMetadata, }; use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ - convert_hash, storage::{ well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, StorageKey, }, - ChangesTrieConfiguration, NativeOrEncoded, + NativeOrEncoded, }; #[cfg(feature = "test-helpers")] use sp_keystore::SyncCryptoStorePtr; use sp_runtime::{ - generic::{BlockId, DigestItem, SignedBlock}, + generic::{BlockId, SignedBlock}, traits::{ - Block as BlockT, DigestFor, HashFor, Header as HeaderT, NumberFor, One, - SaturatedConversion, Zero, + Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - BuildStorage, Justification, Justifications, + BuildStorage, Digest, Justification, Justifications, }; use sp_state_machine::{ - key_changes, key_changes_proof, prove_child_read, prove_range_read_with_child_with_size, - prove_read, read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend, - ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, - ChangesTrieStorage, DBValue, KeyValueStates, KeyValueStorageLevel, MAX_NESTED_TRIE_DEPTH, + prove_child_read, prove_range_read_with_child_with_size, prove_read, + read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend, KeyValueStates, + KeyValueStorageLevel, MAX_NESTED_TRIE_DEPTH, }; use sp_trie::{CompactProof, StorageProof}; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, marker::PhantomData, panic::UnwindSafe, path::PathBuf, @@ -413,250 +406,6 @@ where self.executor.runtime_version(id) } - /// Reads given header and generates CHT-based header proof for CHT of given size. - pub fn header_proof_with_cht_size( - &self, - id: &BlockId, - cht_size: NumberFor, - ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - let proof_error = || { - sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)) - }; - let header = self.backend.blockchain().expect_header(*id)?; - let block_num = *header.number(); - let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; - let cht_start = cht::start_number(cht_size, cht_num); - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let headers = cht_range.map(|num| self.block_hash(num)); - let proof = cht::build_proof::, _, _>( - cht_size, - cht_num, - std::iter::once(block_num), - headers, - )?; - Ok((header, proof)) - } - - /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. - pub fn key_changes_proof_with_cht_size( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - cht_size: NumberFor, - ) -> sp_blockchain::Result> { - struct AccessedRootsRecorder<'a, Block: BlockT> { - storage: &'a dyn ChangesTrieStorage, NumberFor>, - min: NumberFor, - required_roots_proofs: Mutex, Block::Hash>>, - } - - impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> - for AccessedRootsRecorder<'a, Block> - { - fn build_anchor( - &self, - hash: Block::Hash, - ) -> Result>, String> { - self.storage.build_anchor(hash) - } - - fn root( - &self, - anchor: &ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - let root = self.storage.root(anchor, block)?; - if block < self.min { - if let Some(ref root) = root { - self.required_roots_proofs.lock().insert(block, root.clone()); - } - } - Ok(root) - } - } - - impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> - for AccessedRootsRecorder<'a, Block> - { - fn as_roots_storage( - &self, - ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { - self - } - - fn with_cached_changed_keys( - &self, - root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap, HashSet>>), - ) -> bool { - self.storage.with_cached_changed_keys(root, functor) - } - - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - self.storage.get(key, prefix) - } - } - - let first_number = - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(first))?; - let (storage, configs) = self.require_changes_trie(first_number, last, true)?; - let min_number = - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; - - let recording_storage = AccessedRootsRecorder:: { - storage: storage.storage(), - min: min_number, - required_roots_proofs: Mutex::new(BTreeMap::new()), - }; - - let max_number = std::cmp::min( - self.backend.blockchain().info().best_number, - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?, - ); - - // fetch key changes proof - let mut proof = Vec::new(); - for (config_zero, config_end, config) in configs { - let last_number = - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(last))?; - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: config_zero, - end: config_end.map(|(config_end_number, _)| config_end_number), - }; - let proof_range = key_changes_proof::, _>( - config_range, - &recording_storage, - first_number, - &ChangesTrieAnchorBlockId { hash: convert_hash(&last), number: last_number }, - max_number, - storage_key, - &key.0, - ) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; - proof.extend(proof_range); - } - - // now gather proofs for all changes tries roots that were touched during key_changes_proof - // execution AND are unknown (i.e. replaced with CHT) to the requester - let roots = recording_storage.required_roots_proofs.into_inner(); - let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?; - - Ok(ChangesProof { - max_block: max_number, - proof, - roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(), - roots_proof, - }) - } - - /// Generate CHT-based proof for roots of changes tries at given blocks. - fn changes_trie_roots_proof>>( - &self, - cht_size: NumberFor, - blocks: I, - ) -> sp_blockchain::Result { - // most probably we have touched several changes tries that are parts of the single CHT - // => GroupBy changes tries by CHT number and then gather proof for the whole group at once - let mut proofs = Vec::new(); - - cht::for_each_cht_group::( - cht_size, - blocks, - |_, cht_num, cht_blocks| { - let cht_proof = - self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; - proofs.push(cht_proof); - Ok(()) - }, - (), - )?; - - Ok(StorageProof::merge(proofs)) - } - - /// Generates CHT-based proof for roots of changes tries at given blocks - /// (that are part of single CHT). - fn changes_trie_roots_proof_at_cht( - &self, - cht_size: NumberFor, - cht_num: NumberFor, - blocks: Vec>, - ) -> sp_blockchain::Result { - let cht_start = cht::start_number(cht_size, cht_num); - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let roots = cht_range.map(|num| { - self.header(&BlockId::Number(num)).map(|block| { - block - .and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned()) - }) - }); - let proof = cht::build_proof::, _, _>( - cht_size, cht_num, blocks, roots, - )?; - Ok(proof) - } - - /// Returns changes trie storage and all configurations that have been active - /// in the range [first; last]. - /// - /// Configurations are returned in descending order (and obviously never overlap). - /// If fail_if_disabled is false, returns maximal consequent configurations ranges, - /// starting from last and stopping on either first, or when CT have been disabled. - /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled - /// inside first..last blocks range. - fn require_changes_trie( - &self, - first: NumberFor, - last: Block::Hash, - fail_if_disabled: bool, - ) -> sp_blockchain::Result<( - &dyn PrunableStateChangesTrieStorage, - Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, - )> { - let storage = self - .backend - .changes_trie_storage() - .ok_or_else(|| sp_blockchain::Error::ChangesTriesNotSupported)?; - - let mut configs = Vec::with_capacity(1); - let mut current = last; - loop { - let config_range = storage.configuration_at(&BlockId::Hash(current))?; - match config_range.config { - Some(config) => configs.push((config_range.zero.0, config_range.end, config)), - None if !fail_if_disabled => return Ok((storage, configs)), - None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), - } - - if config_range.zero.0 < first { - break - } - - current = *self - .backend - .blockchain() - .expect_header(BlockId::Hash(config_range.zero.1))? - .parent_hash(); - } - - Ok((storage, configs)) - } - /// Apply a checked and validated block to an operation. If a justification is provided /// then `finalized` *must* be true. fn apply_block( @@ -811,7 +560,7 @@ where sc_consensus::StorageChanges::Changes(storage_changes) => { self.backend .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; - let (main_sc, child_sc, offchain_sc, tx, _, changes_trie_tx, tx_index) = + let (main_sc, child_sc, offchain_sc, tx, _, tx_index) = storage_changes.into_inner(); if self.config.offchain_indexing_api { @@ -822,9 +571,6 @@ where operation.op.update_storage(main_sc.clone(), child_sc.clone())?; operation.op.update_transaction_index(tx_index)?; - if let Some(changes_trie_transaction) = changes_trie_tx { - operation.op.update_changes_trie(changes_trie_transaction)?; - } Some((main_sc, child_sc)) }, sc_consensus::StorageChanges::Import(changes) => { @@ -1003,11 +749,8 @@ where )?; let state = self.backend.state_at(at)?; - let changes_trie_state = - changes_tries_state_at_block(&at, self.backend.changes_trie_storage())?; - let gen_storage_changes = runtime_api - .into_storage_changes(&state, changes_trie_state.as_ref(), *parent_hash) + .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root @@ -1356,25 +1099,6 @@ where .map(|(r, p)| (r, StorageProof::merge(vec![p, code_proof]))) } - fn header_proof( - &self, - id: &BlockId, - ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - self.header_proof_with_cht_size(id, cht::size()) - } - - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result> { - self.key_changes_proof_with_cht_size(first, last, min, max, storage_key, key, cht::size()) - } - fn read_proof_collection( &self, id: &BlockId, @@ -1540,7 +1264,7 @@ where fn new_block_at>( &self, parent: &BlockId, - inherent_digests: DigestFor, + inherent_digests: Digest, record_proof: R, ) -> sp_blockchain::Result> { sc_block_builder::BlockBuilder::new( @@ -1555,7 +1279,7 @@ where fn new_block( &self, - inherent_digests: DigestFor, + inherent_digests: Digest, ) -> sp_blockchain::Result> { let info = self.chain_info(); sc_block_builder::BlockBuilder::new( @@ -1703,89 +1427,6 @@ where .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) } - - fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> sp_blockchain::Result, BlockId)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - if first > last_number { - return Err(sp_blockchain::Error::ChangesTrieAccessFailed( - "Invalid changes trie range".into(), - )) - } - - let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { - Some((storage, configs)) => (storage, configs), - None => return Ok(None), - }; - - let first_available_changes_trie = configs.last().map(|config| config.0); - match first_available_changes_trie { - Some(first_available_changes_trie) => { - let oldest_unpruned = storage.oldest_pruned_digest_range_end(); - let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); - Ok(Some((first, last))) - }, - None => Ok(None), - } - } - - fn key_changes( - &self, - first: NumberFor, - last: BlockId, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result, u32)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; - - let mut result = Vec::new(); - let best_number = self.backend.blockchain().info().best_number; - for (config_zero, config_end, config) in configs { - let range_first = ::std::cmp::max(first, config_zero + One::one()); - let range_anchor = match config_end { - Some((config_end_number, config_end_hash)) => - if last_number > config_end_number { - ChangesTrieAnchorBlockId { - hash: config_end_hash, - number: config_end_number, - } - } else { - ChangesTrieAnchorBlockId { - hash: convert_hash(&last_hash), - number: last_number, - } - }, - None => - ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, - }; - - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: config_zero.clone(), - end: config_end.map(|(config_end_number, _)| config_end_number), - }; - let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( - config_range, - storage.storage(), - range_first, - &range_anchor, - best_number, - storage_key, - &key.0, - ) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; - result.extend(result_range); - } - - Ok(result) - } } impl HeaderMetadata for Client @@ -1913,16 +1554,6 @@ where } } -impl ProvideCache for Client -where - B: backend::Backend, - Block: BlockT, -{ - fn cache(&self) -> Option>> { - self.backend.blockchain().cache() - } -} - impl ProvideRuntimeApi for Client where B: backend::Backend, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index bd43d4c464ea0..563846c75d89a 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -68,7 +68,7 @@ use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use sc_network::config::{OnDemand, TransactionImport, TransactionImportFuture}; +pub use sc_network::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::Metadata as RpcMetadata; pub use sc_tracing::TracingReceiver; pub use sc_transaction_pool::Options as TransactionPoolOptions; diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 03967db15f678..7694e0f6893d6 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -19,7 +19,6 @@ tokio = { version = "1.10.0", features = ["time"] } log = "0.4.8" fdlimit = "0.2.1" parking_lot = "0.11.1" -sc-light = { version = "4.0.0-dev", path = "../../light" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } diff --git a/client/service/test/src/client/db.rs b/client/service/test/src/client/db.rs index 772fdcada72ef..5278c9a13a4d7 100644 --- a/client/service/test/src/client/db.rs +++ b/client/service/test/src/client/db.rs @@ -21,7 +21,6 @@ use std::sync::Arc; type TestBackend = sc_client_api::in_mem::Backend; - #[test] fn test_leaves_with_complex_block_tree() { let backend = Arc::new(TestBackend::new()); diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 33cbefbb06a95..d5e23d319e83e 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -27,29 +27,25 @@ use sc_client_db::{ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; -use sc_service::client::{self, new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError, SelectChain}; -use sp_core::{blake2_256, testing::TaskExecutor, ChangesTrieConfiguration, H256}; +use sp_core::{testing::TaskExecutor, H256}; use sp_runtime::{ generic::BlockId, traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, - ConsensusEngineId, DigestItem, Justifications, + ConsensusEngineId, Justifications, }; use sp_state_machine::{ backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, }; use sp_storage::{ChildInfo, StorageKey}; use sp_trie::{trie_types::Layout, TrieConfiguration}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; +use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime::TestAPI; use substrate_test_runtime_client::{ prelude::*, runtime::{ - self, genesismap::{insert_genesis_block, GenesisConfig}, Block, BlockNumber, Digest, Hash, Header, RuntimeApi, Transfer, }, @@ -57,6 +53,8 @@ use substrate_test_runtime_client::{ Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; +mod db; + const TEST_ENGINE_ID: ConsensusEngineId = *b"TEST"; pub struct ExecutorDispatch; @@ -77,86 +75,6 @@ fn executor() -> sc_executor::NativeElseWasmExecutor { sc_executor::NativeElseWasmExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) } -pub fn prepare_client_with_key_changes() -> ( - client::Client< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::ExecutorDispatch, - Block, - RuntimeApi, - >, - Vec, - Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, -) { - // prepare block structure - let blocks_transfers = vec![ - vec![ - (AccountKeyring::Alice, AccountKeyring::Dave), - (AccountKeyring::Bob, AccountKeyring::Dave), - ], - vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], - vec![], - vec![(AccountKeyring::Alice, AccountKeyring::Dave)], - ]; - - // prepare client ang import blocks - let mut local_roots = Vec::new(); - let config = Some(ChangesTrieConfiguration::new(4, 2)); - let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); - let mut nonces: HashMap<_, u64> = Default::default(); - for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { - let mut builder = remote_client.new_block(Default::default()).unwrap(); - for (from, to) in block_transfers { - builder - .push_transfer(Transfer { - from: from.into(), - to: to.into(), - amount: 1, - nonce: *nonces.entry(from).and_modify(|n| *n = *n + 1).or_default(), - }) - .unwrap(); - } - let block = builder.build().unwrap().block; - block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); - - let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); - let trie_root = header - .digest() - .log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref())) - .unwrap(); - local_roots.push(trie_root); - } - - // prepare test cases - let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); - let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); - let charlie = - blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); - let ferdie = - blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); - let test_cases = vec![ - (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), - (1, 3, alice.clone(), vec![(1, 0)]), - (2, 4, alice.clone(), vec![(4, 0)]), - (2, 3, alice.clone(), vec![]), - (1, 4, bob.clone(), vec![(1, 1)]), - (1, 1, bob.clone(), vec![(1, 1)]), - (2, 4, bob.clone(), vec![]), - (1, 4, charlie.clone(), vec![(2, 0)]), - (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), - (3, 4, dave.clone(), vec![(4, 0)]), - (1, 4, eve.clone(), vec![(2, 0)]), - (1, 1, eve.clone(), vec![]), - (3, 4, eve.clone(), vec![]), - (1, 4, ferdie.clone(), vec![]), - ]; - - (remote_client, local_roots, test_cases) -} - fn construct_block( backend: &InMemoryBackend, number: BlockNumber, @@ -184,7 +102,6 @@ fn construct_block( StateMachine::new( backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_initialize_block", @@ -199,7 +116,6 @@ fn construct_block( for tx in transactions.iter() { StateMachine::new( backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "BlockBuilder_apply_extrinsic", @@ -214,7 +130,6 @@ fn construct_block( let ret_data = StateMachine::new( backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "BlockBuilder_finalize_block", @@ -248,7 +163,6 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec(), &mut overlay, &executor(), "Core_execute_block", @@ -283,7 +196,6 @@ fn construct_genesis_should_work_with_native() { #[test] fn construct_genesis_should_work_with_wasm() { let mut storage = GenesisConfig::new( - None, vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, @@ -302,7 +214,6 @@ fn construct_genesis_should_work_with_wasm() { let _ = StateMachine::new( &backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_execute_block", @@ -318,7 +229,6 @@ fn construct_genesis_should_work_with_wasm() { #[test] fn construct_genesis_with_bad_transaction_should_panic() { let mut storage = GenesisConfig::new( - None, vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 68, @@ -337,7 +247,6 @@ fn construct_genesis_with_bad_transaction_should_panic() { let r = StateMachine::new( &backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_execute_block", @@ -906,23 +815,6 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { ); } -#[test] -fn key_changes_works() { - let (client, _, test_cases) = prepare_client_with_key_changes(); - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let end = client.block_hash(end).unwrap().unwrap(); - let actual_result = - client.key_changes(begin, BlockId::Hash(end), None, &StorageKey(key)).unwrap(); - if actual_result != expected_result { - panic!( - "Failed test {}: actual = {:?}, expected = {:?}", - index, actual_result, expected_result, - ); - } - } -} - #[test] fn import_with_justification() { let mut client = substrate_test_runtime_client::new(); @@ -1229,12 +1121,8 @@ fn doesnt_import_blocks_that_revert_finality() { ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::RuntimeApiError(sp_api::ApiError::Application(Box::new( - sp_blockchain::Error::NotInFinalizedChain, - ))) - .to_string(), - ); + let expected_err = + ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); assert_eq!(import_err.to_string(), expected_err.to_string()); @@ -1536,152 +1424,6 @@ fn returns_status_for_pruned_blocks() { ); } -#[test] -fn imports_blocks_with_changes_tries_config_change() { - // create client with initial 4^2 configuration - let mut client = TestClientBuilder::with_default_backend() - .changes_trie_config(Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - })) - .build(); - - // =================================================================== - // blocks 1,2,3,4,5,6,7,8,9,10 are empty - // block 11 changes the key - // block 12 is the L1 digest that covers this change - // blocks 13,14,15,16,17,18,19,20,21,22 are empty - // block 23 changes the configuration to 5^1 AND is skewed digest - // =================================================================== - // blocks 24,25 are changing the key - // block 26 is empty - // block 27 changes the key - // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to - // `3^1` - // =================================================================== - // block 29 is empty - // block 30 changes the key - // block 31 is L1 digest that covers this change - // =================================================================== - (1..11).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (11..12).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (12..23).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (23..24).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 5, - digest_levels: 1, - })) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (24..26).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (26..27).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (27..28).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (28..29).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 3, - digest_levels: 1, - })) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (29..30).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (30..31).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (31..32).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - - // now check that configuration cache works - assert_eq!( - client.key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])).unwrap(), - vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] - ); -} - #[test] fn storage_keys_iter_prefix_and_start_key_works() { let child_info = ChildInfo::new_default(b"child"); diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 8af0ea98f8100..08fe1e34edb88 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -18,7 +18,7 @@ //! Chain api required for the transaction pool. -use codec::{Decode, Encode}; +use codec::Encode; use futures::{ channel::{mpsc, oneshot}, future::{ready, Future, FutureExt, Ready}, @@ -28,16 +28,12 @@ use futures::{ use std::{marker::PhantomData, pin::Pin, sync::Arc}; use prometheus_endpoint::Registry as PrometheusRegistry; -use sc_client_api::{ - blockchain::HeaderBackend, - light::{Fetcher, RemoteBodyRequest, RemoteCallRequest}, - BlockBackend, -}; +use sc_client_api::{blockchain::HeaderBackend, BlockBackend}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, - traits::{self, Block as BlockT, BlockIdTo, Hash as HashT, Header as HeaderT}, + traits::{self, Block as BlockT, BlockIdTo}, transaction_validity::{TransactionSource, TransactionValidity}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; @@ -290,127 +286,3 @@ where validate_transaction_blocking(&*self.client, at, source, uxt) } } - -/// The transaction pool logic for light client. -pub struct LightChainApi { - client: Arc, - fetcher: Arc, - _phantom: PhantomData, -} - -impl LightChainApi { - /// Create new transaction pool logic. - pub fn new(client: Arc, fetcher: Arc) -> Self { - LightChainApi { client, fetcher, _phantom: Default::default() } - } -} - -impl graph::ChainApi for LightChainApi -where - Block: BlockT, - Client: HeaderBackend + 'static, - F: Fetcher + 'static, -{ - type Block = Block; - type Error = error::Error; - type ValidationFuture = - Box> + Send + Unpin>; - type BodyFuture = Pin< - Box< - dyn Future::Extrinsic>>>> - + Send, - >, - >; - - fn validate_transaction( - &self, - at: &BlockId, - source: TransactionSource, - uxt: graph::ExtrinsicFor, - ) -> Self::ValidationFuture { - let header_hash = self.client.expect_block_hash_from_id(at); - let header_and_hash = header_hash.and_then(|header_hash| { - self.client - .expect_header(BlockId::Hash(header_hash)) - .map(|header| (header_hash, header)) - }); - let (block, header) = match header_and_hash { - Ok((header_hash, header)) => (header_hash, header), - Err(err) => return Box::new(ready(Err(err.into()))), - }; - let remote_validation_request = self.fetcher.remote_call(RemoteCallRequest { - block, - header, - method: "TaggedTransactionQueue_validate_transaction".into(), - call_data: (source, uxt, block).encode(), - retry_count: None, - }); - let remote_validation_request = remote_validation_request.then(move |result| { - let result: error::Result = - result.map_err(Into::into).and_then(|result| { - Decode::decode(&mut &result[..]).map_err(|e| { - Error::RuntimeApi(format!("Error decoding tx validation result: {:?}", e)) - }) - }); - ready(result) - }); - - Box::new(remote_validation_request) - } - - fn block_id_to_number( - &self, - at: &BlockId, - ) -> error::Result>> { - Ok(self.client.block_number_from_id(at)?) - } - - fn block_id_to_hash( - &self, - at: &BlockId, - ) -> error::Result>> { - Ok(self.client.block_hash_from_id(at)?) - } - - fn hash_and_length( - &self, - ex: &graph::ExtrinsicFor, - ) -> (graph::ExtrinsicHash, usize) { - ex.using_encoded(|x| (<::Hashing as HashT>::hash(x), x.len())) - } - - fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - let header = self - .client - .header(*id) - .and_then(|h| h.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))); - let header = match header { - Ok(header) => header, - Err(err) => { - log::warn!(target: "txpool", "Failed to query header: {:?}", err); - return Box::pin(ready(Ok(None))) - }, - }; - - let fetcher = self.fetcher.clone(); - async move { - let transactions = fetcher - .remote_body(RemoteBodyRequest { header, retry_count: None }) - .await - .unwrap_or_else(|e| { - log::warn!(target: "txpool", "Failed to fetch block body: {:?}", e); - Vec::new() - }); - - Ok(Some(transactions)) - } - .boxed() - } - - fn block_header( - &self, - at: &BlockId, - ) -> Result::Header>, Self::Error> { - self.client.header(*at).map_err(Into::into) - } -} diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 3565cb52ad87b..85af4f89d72b5 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -38,7 +38,7 @@ pub mod test_helpers { }; } -pub use crate::api::{FullChainApi, LightChainApi}; +pub use crate::api::FullChainApi; use futures::{ channel::oneshot, future::{self, ready}, @@ -79,9 +79,6 @@ type PolledIterator = Pin = BasicPool, Block>; -/// A transaction pool for a light node. -pub type LightPool = - BasicPool, Block>; /// Basic implementation of transaction pool that can be customized by providing PoolApi. pub struct BasicPool @@ -364,33 +361,6 @@ where } } -impl LightPool -where - Block: BlockT, - Client: sp_blockchain::HeaderBackend + sc_client_api::UsageProvider + 'static, - Fetcher: sc_client_api::Fetcher + 'static, -{ - /// Create new basic transaction pool for a light node with the provided api. - pub fn new_light( - options: graph::Options, - prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnEssentialNamed, - client: Arc, - fetcher: Arc, - ) -> Self { - let pool_api = Arc::new(LightChainApi::new(client.clone(), fetcher)); - Self::with_revalidation_type( - options, - false.into(), - pool_api, - prometheus, - RevalidationType::Light, - spawner, - client.usage_info().chain.best_number, - ) - } -} - impl FullPool where Block: BlockT, diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index a4e55f25df5f6..887bb359ed3db 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -151,7 +151,7 @@ impl Pallet { fn change_authorities(new: WeakBoundedVec) { >::put(&new); - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( AURA_ENGINE_ID, ConsensusLog::AuthoritiesChange(new.into_inner()).encode(), ); @@ -222,7 +222,7 @@ impl OneSessionHandler for Pallet { } fn on_disabled(i: u32) { - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( AURA_ENGINE_ID, ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), ); diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 033d993f4e26d..569722ca38ced 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -633,7 +633,7 @@ impl Pallet { } fn deposit_consensus(new: U) { - let log: DigestItem = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); + let log = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); >::deposit_log(log.into()) } diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index 7c70766623b4d..f27bc450ad146 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -40,7 +40,7 @@ fn init_block(block: u64) { BeefyMmr::on_initialize(block); } -pub fn beefy_log(log: ConsensusLog) -> DigestItem { +pub fn beefy_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) } diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index 3b28d454849cf..6a7118c1f5c96 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -110,7 +110,7 @@ impl Pallet { let next_id = Self::validator_set_id() + 1u64; >::put(next_id); - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( BEEFY_ENGINE_ID, ConsensusLog::AuthoritiesChange(ValidatorSet { validators: new, id: next_id }) .encode(), @@ -163,7 +163,7 @@ impl OneSessionHandler for Pallet { } fn on_disabled(i: u32) { - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( BEEFY_ENGINE_ID, ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), ); diff --git a/frame/beefy/src/tests.rs b/frame/beefy/src/tests.rs index 24f9acaf76bfc..252c03efb54a9 100644 --- a/frame/beefy/src/tests.rs +++ b/frame/beefy/src/tests.rs @@ -20,7 +20,6 @@ use std::vec; use beefy_primitives::ValidatorSet; use codec::Encode; -use sp_core::H256; use sp_runtime::DigestItem; use frame_support::traits::OnInitialize; @@ -32,7 +31,7 @@ fn init_block(block: u64) { Session::on_initialize(block); } -pub fn beefy_log(log: ConsensusLog) -> DigestItem { +pub fn beefy_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index b1bdf357ec07d..e77c811a35e2d 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -125,7 +125,6 @@ use frame_support::{ }, weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; -use frame_system::DigestOf; use sp_runtime::{ generic::Digest, traits::{ @@ -281,8 +280,8 @@ where Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); } - fn extract_pre_digest(header: &System::Header) -> DigestOf { - let mut digest = >::default(); + fn extract_pre_digest(header: &System::Header) -> Digest { + let mut digest = ::default(); header.digest().logs().iter().for_each(|d| { if d.as_pre_runtime().is_some() { digest.push(d.clone()) @@ -294,7 +293,7 @@ where fn initialize_block_impl( block_number: &System::BlockNumber, parent_hash: &System::Hash, - digest: &Digest, + digest: &Digest, ) { let mut weight = 0; if Self::runtime_upgraded() { diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 9f6967a7d3c85..0e7d885649cc3 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -508,7 +508,7 @@ impl Pallet { /// Deposit one of this module's logs. fn deposit_log(log: ConsensusLog) { - let log: DigestItem = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); + let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); >::deposit_log(log.into()); } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index f1996553f02eb..49e4022a4aaed 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -254,7 +254,7 @@ impl Config for Test { type MaxAuthorities = MaxAuthorities; } -pub fn grandpa_log(log: ConsensusLog) -> DigestItem { +pub fn grandpa_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()) } diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 69445932b869e..81f98f2c23d48 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -1223,7 +1223,7 @@ mod private { pub trait Sealed {} impl Sealed for Vec {} - impl Sealed for Digest {} + impl Sealed for Digest {} impl Sealed for BoundedVec {} impl Sealed for WeakBoundedVec {} impl Sealed for bounded_btree_map::BoundedBTreeMap {} @@ -1263,7 +1263,7 @@ impl StorageDecodeLength for Vec {} /// We abuse the fact that SCALE does not put any marker into the encoding, i.e. we only encode the /// internal vec and we can append to this vec. We have a test that ensures that if the `Digest` /// format ever changes, we need to remove this here. -impl StorageAppend> for Digest {} +impl StorageAppend for Digest {} /// Marker trait that is implemented for types that support the `storage::append` api with a limit /// on the number of element. @@ -1484,8 +1484,8 @@ mod test { fn digest_storage_append_works_as_expected() { TestExternalities::default().execute_with(|| { struct Storage; - impl generator::StorageValue> for Storage { - type Query = Digest; + impl generator::StorageValue for Storage { + type Query = Digest; fn module_prefix() -> &'static [u8] { b"MyModule" @@ -1495,23 +1495,20 @@ mod test { b"Storage" } - fn from_optional_value_to_query(v: Option>) -> Self::Query { + fn from_optional_value_to_query(v: Option) -> Self::Query { v.unwrap() } - fn from_query_to_optional_value(v: Self::Query) -> Option> { + fn from_query_to_optional_value(v: Self::Query) -> Option { Some(v) } } - Storage::append(DigestItem::ChangesTrieRoot(1)); Storage::append(DigestItem::Other(Vec::new())); let value = unhashed::get_raw(&Storage::storage_value_final_key()).unwrap(); - let expected = Digest { - logs: vec![DigestItem::ChangesTrieRoot(1), DigestItem::Other(Vec::new())], - }; + let expected = Digest { logs: vec![DigestItem::Other(Vec::new())] }; assert_eq!(Digest::decode(&mut &value[..]).unwrap(), expected); }); } diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index e7371b1099e5e..eddf78ce6b254 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -22,8 +22,8 @@ use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{storage, traits::Get, weights::DispatchClass}; -use frame_system::{Call, DigestItemOf, Pallet as System, RawOrigin}; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use frame_system::{Call, Pallet as System, RawOrigin}; +use sp_core::storage::well_known_keys; use sp_runtime::traits::Hash; use sp_std::{prelude::*, vec}; @@ -62,23 +62,6 @@ benchmarks! { assert_eq!(current_code.len(), 4_000_000 as usize); } - set_changes_trie_config { - let d = 1000; - - let digest_item = DigestItemOf::::Other(vec![]); - - for i in 0 .. d { - System::::deposit_log(digest_item.clone()); - } - let changes_trie_config = ChangesTrieConfiguration { - digest_interval: d, - digest_levels: d, - }; - }: _(RawOrigin::Root, Some(changes_trie_config)) - verify { - assert_eq!(System::::digest().logs.len(), (d + 1) as usize) - } - #[skip_meta] set_storage { let i in 1 .. 1000; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 41e1738c034f1..d5b930fa165e6 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -95,7 +95,7 @@ use frame_support::{ Parameter, }; use scale_info::TypeInfo; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use sp_core::storage::well_known_keys; #[cfg(feature = "std")] use frame_support::traits::GenesisBuild; @@ -405,37 +405,6 @@ pub mod pallet { Ok(().into()) } - /// Set the new changes trie configuration. - /// - /// # - /// - `O(1)` - /// - 1 storage write or delete (codec `O(1)`). - /// - 1 call to `deposit_log`: Uses `append` API, so O(1) - /// - Base Weight: 7.218 µs - /// - DB Weight: - /// - Writes: Changes Trie, System Digest - /// # - #[pallet::weight((T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational))] - pub fn set_changes_trie_config( - origin: OriginFor, - changes_trie_config: Option, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - match changes_trie_config.clone() { - Some(changes_trie_config) => storage::unhashed::put_raw( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ), - None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), - } - - let log = generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), - ); - Self::deposit_log(log.into()); - Ok(().into()) - } - /// Set some items of storage. /// /// # @@ -617,7 +586,7 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] #[pallet::getter(fn digest)] - pub(super) type Digest = StorageValue<_, DigestOf, ValueQuery>; + pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; /// Events deposited for the current block. /// @@ -666,7 +635,6 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { - pub changes_trie_config: Option, #[serde(with = "sp_core::bytes")] pub code: Vec, } @@ -674,7 +642,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { changes_trie_config: Default::default(), code: Default::default() } + Self { code: Default::default() } } } @@ -689,12 +657,6 @@ pub mod pallet { sp_io::storage::set(well_known_keys::CODE, &self.code); sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); - if let Some(ref changes_trie_config) = self.changes_trie_config { - sp_io::storage::set( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ); - } } } } @@ -759,9 +721,6 @@ impl GenesisConfig { } } -pub type DigestOf = generic::Digest<::Hash>; -pub type DigestItemOf = generic::DigestItem<::Hash>; - pub type Key = Vec; pub type KeyValue = (Vec, Vec); @@ -1369,7 +1328,7 @@ impl Pallet { pub fn initialize( number: &T::BlockNumber, parent_hash: &T::Hash, - digest: &DigestOf, + digest: &generic::Digest, kind: InitKind, ) { // populate environment @@ -1409,7 +1368,7 @@ impl Pallet { // stay to be inspected by the client and will be cleared by `Self::initialize`. let number = >::get(); let parent_hash = >::get(); - let mut digest = >::get(); + let digest = >::get(); let extrinsics = (0..ExtrinsicCount::::take().unwrap_or_default()) .map(ExtrinsicData::::take) @@ -1427,17 +1386,6 @@ impl Pallet { let storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) .expect("Node is configured to use the same hash; qed"); - let storage_changes_root = sp_io::storage::changes_root(&parent_hash.encode()); - - // we can't compute changes trie root earlier && put it to the Digest - // because it will include all currently existing temporaries. - if let Some(storage_changes_root) = storage_changes_root { - let item = generic::DigestItem::ChangesTrieRoot( - T::Hash::decode(&mut &storage_changes_root[..]) - .expect("Node is configured to use the same hash; qed"), - ); - digest.push(item); - } ::new( number, @@ -1454,7 +1402,7 @@ impl Pallet { /// - `O(1)` /// - 1 storage write (codec `O(1)`) /// # - pub fn deposit_log(item: DigestItemOf) { + pub fn deposit_log(item: generic::DigestItem) { >::append(item); } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index bc0f027e1efaa..dd2a7f6c14909 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -288,10 +288,6 @@ fn generate_runtime_api_base_structures() -> Result { fn into_storage_changes( &self, backend: &Self::StateBackend, - changes_trie_state: Option<&#crate_::ChangesTrieState< - #crate_::HashFor, - #crate_::NumberFor, - >>, parent_hash: Block::Hash, ) -> std::result::Result< #crate_::StorageChanges, @@ -299,7 +295,6 @@ fn generate_runtime_api_base_structures() -> Result { > where Self: Sized { self.changes.replace(Default::default()).into_storage_changes( backend, - changes_trie_state, parent_hash, self.storage_transaction_cache.replace(Default::default()), ) diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 77f8a07f85c48..9483d018c4a40 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -116,10 +116,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result, - #crate_::NumberFor<#block_type>, - >>, _: <#block_type as #crate_::BlockT>::Hash, ) -> std::result::Result< #crate_::StorageChanges, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index cb74f95d21b09..7f64e191941f7 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -97,7 +97,7 @@ pub use sp_runtime::{ #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - Backend as StateBackend, ChangesTrieState, InMemoryBackend, OverlayedChanges, StorageProof, + Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, }; #[cfg(feature = "std")] use sp_std::result; @@ -394,14 +394,12 @@ pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash> pub type StorageTransactionCache = sp_state_machine::StorageTransactionCache< >>::Transaction, HashFor, - NumberFor, >; #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges< >>::Transaction, HashFor, - NumberFor, >; /// Extract the state backend type for a type that implements `ProvideRuntimeApi`. @@ -514,7 +512,6 @@ pub trait ApiExt { fn into_storage_changes( &self, backend: &Self::StateBackend, - changes_trie_state: Option<&ChangesTrieState, NumberFor>>, parent_hash: Block::Hash, ) -> Result, String> where diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 101f92fd6c7d7..e32290b12a599 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -211,7 +211,7 @@ fn record_proof_works() { None, 8, ); - execution_proof_check_on_trie_backend::<_, u64, _, _>( + execution_proof_check_on_trie_backend( &backend, &mut overlay, &executor, diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index fc70ce845dc98..71c3f36a161b6 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -17,8 +17,6 @@ //! Substrate blockchain trait -use std::sync::Arc; - use log::warn; use parking_lot::RwLock; use sp_runtime::{ @@ -96,8 +94,6 @@ pub trait Backend: fn justifications(&self, id: BlockId) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; /// Returns hashes of all blocks that are leaves of the block tree. /// in other words, that have no children, are chain heads. @@ -237,33 +233,6 @@ pub trait Backend: fn block_indexed_body(&self, id: BlockId) -> Result>>>; } -/// Provides access to the optional cache. -pub trait ProvideCache { - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; -} - -/// Blockchain optional data cache. -pub trait Cache: Send + Sync { - /// Initialize genesis value for the given cache. - /// - /// The operation should be performed once before anything else is inserted in the cache. - /// Otherwise cache may end up in inconsistent state. - fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; - /// Returns cached value by the given key. - /// - /// Returned tuple is the range where value has been active and the value itself. - /// Fails if read from cache storage fails or if the value for block is discarded - /// (i.e. if block is earlier that best finalized, but it is not in canonical chain). - fn get_at( - &self, - key: &well_known_cache_keys::Id, - block: &BlockId, - ) -> Result< - Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, - >; -} - /// Blockchain info #[derive(Debug, Eq, PartialEq)] pub struct Info { diff --git a/primitives/consensus/aura/src/digests.rs b/primitives/consensus/aura/src/digests.rs index eaa29036d98a1..6925862d0ce94 100644 --- a/primitives/consensus/aura/src/digests.rs +++ b/primitives/consensus/aura/src/digests.rs @@ -25,7 +25,6 @@ use crate::AURA_ENGINE_ID; use codec::{Codec, Encode}; use sp_consensus_slots::Slot; use sp_runtime::generic::DigestItem; -use sp_std::fmt::Debug; /// A digest item which is usable with aura consensus. pub trait CompatibleDigestItem: Sized { @@ -42,10 +41,9 @@ pub trait CompatibleDigestItem: Sized { fn as_aura_pre_digest(&self) -> Option; } -impl CompatibleDigestItem for DigestItem +impl CompatibleDigestItem for DigestItem where Signature: Codec, - Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static, { fn aura_seal(signature: Signature) -> Self { DigestItem::Seal(AURA_ENGINE_ID, signature.encode()) diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 1c908fe61fc0b..4847adec37f18 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -21,7 +21,7 @@ use super::{ AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, BabeEpochConfiguration, Slot, BABE_ENGINE_ID, }; -use codec::{Codec, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; @@ -177,10 +177,7 @@ pub trait CompatibleDigestItem: Sized { fn as_next_config_descriptor(&self) -> Option; } -impl CompatibleDigestItem for DigestItem -where - Hash: Send + Sync + Eq + Clone + Codec + 'static, -{ +impl CompatibleDigestItem for DigestItem { fn babe_pre_digest(digest: PreDigest) -> Self { DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index d7979baf47c11..ce834fd0a47f4 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -26,7 +26,8 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, DigestFor, HashFor, NumberFor}, + traits::{Block as BlockT, HashFor}, + Digest, }; use sp_state_machine::StorageProof; @@ -111,8 +112,7 @@ pub struct Proposal { /// Proof that was recorded while building the block. pub proof: Proof, /// The storage changes while building this block. - pub storage_changes: - sp_state_machine::StorageChanges, NumberFor>, + pub storage_changes: sp_state_machine::StorageChanges>, } /// Error that is returned when [`ProofRecording`] requested to record a proof, @@ -224,7 +224,7 @@ pub trait Proposer { fn propose( self, inherent_data: InherentData, - inherent_digests: DigestFor, + inherent_digests: Digest, max_duration: Duration, block_size_limit: Option, ) -> Self::Proposal; diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs deleted file mode 100644 index f4ce83dc2c877..0000000000000 --- a/primitives/core/src/changes_trie.rs +++ /dev/null @@ -1,321 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Substrate changes trie configuration. - -use codec::{Decode, Encode}; -use num_traits::Zero; -#[cfg(any(feature = "std", test))] -use serde::{Deserialize, Serialize}; - -/// Substrate changes trie configuration. -#[cfg_attr( - any(feature = "std", test), - derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf) -)] -#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode, scale_info::TypeInfo)] -pub struct ChangesTrieConfiguration { - /// Interval (in blocks) at which level1-digests are created. Digests are not - /// created when this is less or equal to 1. - pub digest_interval: u32, - /// Maximal number of digest levels in hierarchy. 0 means that digests are not - /// created at all (even level1 digests). 1 means only level1-digests are created. - /// 2 means that every digest_interval^2 there will be a level2-digest, and so on. - /// Please ensure that maximum digest interval (i.e. digest_interval^digest_levels) - /// is within `u32` limits. Otherwise you'll never see digests covering such intervals - /// && maximal digests interval will be truncated to the last interval that fits - /// `u32` limits. - pub digest_levels: u32, -} - -/// Substrate changes trie configuration range. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ChangesTrieConfigurationRange { - /// Zero block of configuration. - pub zero: (Number, Hash), - /// Last block of configuration (if configuration has been deactivated at some point). - pub end: Option<(Number, Hash)>, - /// The configuration itself. None if changes tries were disabled in this range. - pub config: Option, -} - -impl ChangesTrieConfiguration { - /// Create new configuration given digest interval and levels. - pub fn new(digest_interval: u32, digest_levels: u32) -> Self { - Self { digest_interval, digest_levels } - } - - /// Is digest build enabled? - pub fn is_digest_build_enabled(&self) -> bool { - self.digest_interval > 1 && self.digest_levels > 0 - } - - /// Do we need to build digest at given block? - pub fn is_digest_build_required_at_block(&self, zero: Number, block: Number) -> bool - where - Number: From - + PartialEq - + ::sp_std::ops::Rem - + ::sp_std::ops::Sub - + ::sp_std::cmp::PartialOrd - + Zero, - { - block > zero && - self.is_digest_build_enabled() && - ((block - zero) % self.digest_interval.into()).is_zero() - } - - /// Returns max digest interval. One if digests are not created at all. - pub fn max_digest_interval(&self) -> u32 { - if !self.is_digest_build_enabled() { - return 1 - } - - // we'll get >1 loop iteration only when bad configuration parameters are selected - let mut current_level = self.digest_levels; - loop { - if let Some(max_digest_interval) = self.digest_interval.checked_pow(current_level) { - return max_digest_interval - } - - current_level -= 1; - } - } - - /// Returns max level digest block number that has been created at block <= passed block number. - /// - /// Returns None if digests are not created at all. - pub fn prev_max_level_digest_block(&self, zero: Number, block: Number) -> Option - where - Number: Clone - + From - + PartialOrd - + PartialEq - + ::sp_std::ops::Add - + ::sp_std::ops::Sub - + ::sp_std::ops::Div - + ::sp_std::ops::Mul - + Zero, - { - if block <= zero { - return None - } - - let (next_begin, next_end) = - self.next_max_level_digest_range(zero.clone(), block.clone())?; - - // if 'next' digest includes our block, then it is a also a previous digest - if next_end == block { - return Some(block) - } - - // if previous digest ends at zero block, then there are no previous digest - let prev_end = next_begin - 1.into(); - if prev_end == zero { - None - } else { - Some(prev_end) - } - } - - /// Returns max level digest blocks range (inclusive) which includes passed block. - /// - /// Returns None if digests are not created at all. - /// It will return the first max-level digest if block is <= zero. - pub fn next_max_level_digest_range( - &self, - zero: Number, - mut block: Number, - ) -> Option<(Number, Number)> - where - Number: Clone - + From - + PartialOrd - + PartialEq - + ::sp_std::ops::Add - + ::sp_std::ops::Sub - + ::sp_std::ops::Div - + ::sp_std::ops::Mul, - { - if !self.is_digest_build_enabled() { - return None - } - - if block <= zero { - block = zero.clone() + 1.into(); - } - - let max_digest_interval: Number = self.max_digest_interval().into(); - let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); - if max_digests_since_zero == 0.into() { - return Some((zero.clone() + 1.into(), zero + max_digest_interval)) - } - let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); - Some(if block == last_max_digest_block { - (block.clone() - max_digest_interval + 1.into(), block) - } else { - (last_max_digest_block.clone() + 1.into(), last_max_digest_block + max_digest_interval) - }) - } - - /// Returns Some if digest must be built at given block number. - /// The tuple is: - /// ( - /// digest level - /// digest interval (in blocks) - /// step between blocks we're interested in when digest is built - /// ) - pub fn digest_level_at_block( - &self, - zero: Number, - block: Number, - ) -> Option<(u32, u32, u32)> - where - Number: Clone - + From - + PartialEq - + ::sp_std::ops::Rem - + ::sp_std::ops::Sub - + ::sp_std::cmp::PartialOrd - + Zero, - { - if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { - return None - } - - let relative_block = block - zero; - let mut digest_interval = self.digest_interval; - let mut current_level = 1u32; - let mut digest_step = 1u32; - while current_level < self.digest_levels { - let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { - Some(new_digest_interval) - if (relative_block.clone() % new_digest_interval.into()).is_zero() => - new_digest_interval, - _ => break, - }; - - digest_step = digest_interval; - digest_interval = new_digest_interval; - current_level += 1; - } - - Some((current_level, digest_interval, digest_step)) - } -} - -#[cfg(test)] -mod tests { - use super::ChangesTrieConfiguration; - - fn config(interval: u32, levels: u32) -> ChangesTrieConfiguration { - ChangesTrieConfiguration { digest_interval: interval, digest_levels: levels } - } - - #[test] - fn is_digest_build_enabled_works() { - assert!(!config(0, 100).is_digest_build_enabled()); - assert!(!config(1, 100).is_digest_build_enabled()); - assert!(config(2, 100).is_digest_build_enabled()); - assert!(!config(100, 0).is_digest_build_enabled()); - assert!(config(100, 1).is_digest_build_enabled()); - } - - #[test] - fn is_digest_build_required_at_block_works() { - fn test_with_zero(zero: u64) { - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 1u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 2u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 8u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 9u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 512u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4096u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4103u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4104u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4108u64)); - } - - test_with_zero(0); - test_with_zero(8); - test_with_zero(17); - } - - #[test] - fn digest_level_at_block_works() { - fn test_with_zero(zero: u64) { - assert_eq!(config(8, 4).digest_level_at_block(zero, zero), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 7u64), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 63u64), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 8u64), Some((1, 8, 1))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 64u64), Some((2, 64, 8))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 512u64), Some((3, 512, 64))); - assert_eq!( - config(8, 4).digest_level_at_block(zero, zero + 4096u64), - Some((4, 4096, 512)) - ); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4112u64), Some((1, 8, 1))); - } - - test_with_zero(0); - test_with_zero(8); - test_with_zero(17); - } - - #[test] - fn max_digest_interval_works() { - assert_eq!(config(0, 0).max_digest_interval(), 1); - assert_eq!(config(2, 2).max_digest_interval(), 4); - assert_eq!(config(8, 4).max_digest_interval(), 4096); - assert_eq!(config(::std::u32::MAX, 1024).max_digest_interval(), ::std::u32::MAX); - } - - #[test] - fn next_max_level_digest_range_works() { - assert_eq!(config(0, 0).next_max_level_digest_range(0u64, 16), None); - assert_eq!(config(1, 1).next_max_level_digest_range(0u64, 16), None); - assert_eq!(config(2, 1).next_max_level_digest_range(0u64, 16), Some((15, 16))); - assert_eq!(config(4, 1).next_max_level_digest_range(0u64, 16), Some((13, 16))); - assert_eq!(config(32, 1).next_max_level_digest_range(0u64, 16), Some((1, 32))); - assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 10), Some((9, 16))); - assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 8), Some((1, 8))); - assert_eq!(config(2, 1).next_max_level_digest_range(1u64, 1), Some((2, 3))); - assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 9), Some((8, 11))); - - assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 5), Some((8, 11))); - } - - #[test] - fn prev_max_level_digest_block_works() { - assert_eq!(config(0, 0).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(1, 1).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(2, 1).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 1).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 17), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 33), Some(32)); - assert_eq!(config(32, 1).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 10), Some(8)); - assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 8), Some(8)); - assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 8), None); - - assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 5), None); - } -} diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index fd752397cd9a9..3983f7cc155d9 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -58,7 +58,6 @@ pub mod hexdisplay; pub mod u32_trait; -mod changes_trie; pub mod ecdsa; pub mod ed25519; pub mod hash; @@ -76,7 +75,6 @@ pub use self::{ hash::{convert_hash, H160, H256, H512}, uint::{U256, U512}, }; -pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index e6a8f8caa8d33..aac45234deadd 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -173,13 +173,6 @@ pub trait Externalities: ExtensionStore { /// operation. fn storage_append(&mut self, key: Vec, value: Vec); - /// Get the changes trie root of the current storage overlay at a block with given `parent`. - /// - /// `parent` expects a SCALE encoded hash. - /// - /// The returned hash is defined by the `Block` and is SCALE encoded. - fn storage_changes_root(&mut self, parent: &[u8]) -> Result>, ()>; - /// Start a new nested transaction. /// /// This allows to either commit or roll back all changes made after this call to the diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 78e6f0c847952..e4f52fd4e0e21 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -195,16 +195,9 @@ pub trait Storage { self.storage_root() } - /// "Commit" all existing operations and get the resulting storage change root. - /// `parent_hash` is a SCALE encoded hash. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns `Some(Vec)` which holds the SCALE encoded hash or `None` when - /// changes trie is disabled. - fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { - self.storage_changes_root(parent_hash) - .expect("Invalid `parent_hash` given to `changes_root`.") + /// Always returns `None`. This function exists for compatibility reasons. + fn changes_root(&mut self, _parent_hash: &[u8]) -> Option> { + None } /// Get the next key in storage after the given one in lexicographic order. @@ -1497,7 +1490,7 @@ pub fn oom(_: core::alloc::Layout) -> ! { /// Type alias for Externalities implementation used in tests. #[cfg(feature = "std")] -pub type TestExternalities = sp_state_machine::TestExternalities; +pub type TestExternalities = sp_state_machine::TestExternalities; /// The host functions Substrate provides for the Wasm runtime environment. /// diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 82c50fffeb8d7..1749cc4853672 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -31,7 +31,7 @@ use std::{ sync::{Arc, Mutex}, }; -type TestExternalities = sp_state_machine::TestExternalities; +type TestExternalities = sp_state_machine::TestExternalities; fn call_wasm_method_with_result( binary: &[u8], diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index 87af9bc77a5fa..978653efb93d8 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -26,59 +26,49 @@ use crate::{ codec::{Decode, Encode, Error, Input}, scale_info::{ build::{Fields, Variants}, - meta_type, Path, Type, TypeInfo, TypeParameter, + Path, Type, TypeInfo, }, ConsensusEngineId, }; -use sp_core::{ChangesTrieConfiguration, RuntimeDebug}; +use sp_core::RuntimeDebug; /// Generic header digest. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] -pub struct Digest { +pub struct Digest { /// A list of logs in the digest. - #[cfg_attr( - feature = "std", - serde(bound(serialize = "Hash: codec::Codec", deserialize = "Hash: codec::Codec")) - )] - pub logs: Vec>, + pub logs: Vec, } -impl Default for Digest { +impl Default for Digest { fn default() -> Self { Self { logs: Vec::new() } } } -impl Digest { +impl Digest { /// Get reference to all digest items. - pub fn logs(&self) -> &[DigestItem] { + pub fn logs(&self) -> &[DigestItem] { &self.logs } /// Push new digest item. - pub fn push(&mut self, item: DigestItem) { + pub fn push(&mut self, item: DigestItem) { self.logs.push(item); } /// Pop a digest item. - pub fn pop(&mut self) -> Option> { + pub fn pop(&mut self) -> Option { self.logs.pop() } /// Get reference to the first digest item that matches the passed predicate. - pub fn log) -> Option<&T>>( - &self, - predicate: F, - ) -> Option<&T> { + pub fn log Option<&T>>(&self, predicate: F) -> Option<&T> { self.logs().iter().find_map(predicate) } /// Get a conversion of the first digest item that successfully converts using the function. - pub fn convert_first) -> Option>( - &self, - predicate: F, - ) -> Option { + pub fn convert_first Option>(&self, predicate: F) -> Option { self.logs().iter().find_map(predicate) } } @@ -87,12 +77,7 @@ impl Digest { /// provide opaque access to other items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] #[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] -pub enum DigestItem { - /// System digest item that contains the root of changes trie at given - /// block. It is created for every block iff runtime supports changes - /// trie creation. - ChangesTrieRoot(Hash), - +pub enum DigestItem { /// A pre-runtime digest. /// /// These are messages from the consensus engine to the runtime, although @@ -116,10 +101,6 @@ pub enum DigestItem { /// by runtimes. Seal(ConsensusEngineId, Vec), - /// Digest item that contains signal from changes tries manager to the - /// native code. - ChangesTrieSignal(ChangesTrieSignal), - /// Some other thing. Unsupported and experimental. Other(Vec), @@ -132,25 +113,8 @@ pub enum DigestItem { RuntimeEnvironmentUpdated, } -/// Available changes trie signals. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug, parity_util_mem::MallocSizeOf))] -pub enum ChangesTrieSignal { - /// New changes trie configuration is enacted, starting from **next block**. - /// - /// The block that emits this signal will contain changes trie (CT) that covers - /// blocks range [BEGIN; current block], where BEGIN is (order matters): - /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created using current - /// configuration AND the last top level digest CT has been created at block - /// LAST_TOP_LEVEL_DIGEST_BLOCK; - /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change before and - /// the last configuration change happened at block LAST_CONFIGURATION_CHANGE_BLOCK; - /// - 1 otherwise. - NewConfiguration(Option), -} - #[cfg(feature = "std")] -impl serde::Serialize for DigestItem { +impl serde::Serialize for DigestItem { fn serialize(&self, seq: S) -> Result where S: serde::Serializer, @@ -160,7 +124,7 @@ impl serde::Serialize for DigestItem { } #[cfg(feature = "std")] -impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { +impl<'a> serde::Deserialize<'a> for DigestItem { fn deserialize(de: D) -> Result where D: serde::Deserializer<'a>, @@ -171,75 +135,48 @@ impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { } } -impl TypeInfo for DigestItem -where - Hash: TypeInfo + 'static, -{ +impl TypeInfo for DigestItem { type Identity = Self; fn type_info() -> Type { - Type::builder() - .path(Path::new("DigestItem", module_path!())) - .type_params(vec![TypeParameter::new("Hash", Some(meta_type::()))]) - .variant( - Variants::new() - .variant("ChangesTrieRoot", |v| { - v.index(DigestItemType::ChangesTrieRoot as u8) - .fields(Fields::unnamed().field(|f| f.ty::().type_name("Hash"))) - }) - .variant("PreRuntime", |v| { - v.index(DigestItemType::PreRuntime as u8).fields( - Fields::unnamed() - .field(|f| { - f.ty::().type_name("ConsensusEngineId") - }) - .field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("Consensus", |v| { - v.index(DigestItemType::Consensus as u8).fields( - Fields::unnamed() - .field(|f| { - f.ty::().type_name("ConsensusEngineId") - }) - .field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("Seal", |v| { - v.index(DigestItemType::Seal as u8).fields( - Fields::unnamed() - .field(|f| { - f.ty::().type_name("ConsensusEngineId") - }) - .field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("ChangesTrieSignal", |v| { - v.index(DigestItemType::ChangesTrieSignal as u8).fields( - Fields::unnamed().field(|f| { - f.ty::().type_name("ChangesTrieSignal") - }), - ) - }) - .variant("Other", |v| { - v.index(DigestItemType::Other as u8).fields( - Fields::unnamed().field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("RuntimeEnvironmentUpdated", |v| { - v.index(DigestItemType::RuntimeEnvironmentUpdated as u8) - .fields(Fields::unit()) - }), - ) + Type::builder().path(Path::new("DigestItem", module_path!())).variant( + Variants::new() + .variant("PreRuntime", |v| { + v.index(DigestItemType::PreRuntime as u8).fields( + Fields::unnamed() + .field(|f| f.ty::().type_name("ConsensusEngineId")) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Consensus", |v| { + v.index(DigestItemType::Consensus as u8).fields( + Fields::unnamed() + .field(|f| f.ty::().type_name("ConsensusEngineId")) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Seal", |v| { + v.index(DigestItemType::Seal as u8).fields( + Fields::unnamed() + .field(|f| f.ty::().type_name("ConsensusEngineId")) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Other", |v| { + v.index(DigestItemType::Other as u8) + .fields(Fields::unnamed().field(|f| f.ty::>().type_name("Vec"))) + }) + .variant("RuntimeEnvironmentUpdated", |v| { + v.index(DigestItemType::RuntimeEnvironmentUpdated as u8).fields(Fields::unit()) + }), + ) } } /// A 'referencing view' for digest item. Does not own its contents. Used by /// final runtime implementations for encoding/decoding its log items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] -pub enum DigestItemRef<'a, Hash: 'a> { - /// Reference to `DigestItem::ChangesTrieRoot`. - ChangesTrieRoot(&'a Hash), +pub enum DigestItemRef<'a> { /// A pre-runtime digest. /// /// These are messages from the consensus engine to the runtime, although @@ -254,9 +191,6 @@ pub enum DigestItemRef<'a, Hash: 'a> { /// Put a Seal on it. This is only used by native code, and is never seen /// by runtimes. Seal(&'a ConsensusEngineId, &'a Vec), - /// Digest item that contains signal from changes tries manager to the - /// native code. - ChangesTrieSignal(&'a ChangesTrieSignal), /// Any 'non-system' digest item, opaque to the native code. Other(&'a Vec), /// Runtime code or heap pages updated. @@ -271,11 +205,9 @@ pub enum DigestItemRef<'a, Hash: 'a> { #[derive(Encode, Decode)] pub enum DigestItemType { Other = 0, - ChangesTrieRoot = 2, Consensus = 4, Seal = 5, PreRuntime = 6, - ChangesTrieSignal = 7, RuntimeEnvironmentUpdated = 8, } @@ -293,25 +225,18 @@ pub enum OpaqueDigestItemId<'a> { Other, } -impl DigestItem { +impl DigestItem { /// Returns a 'referencing view' for this digest item. - pub fn dref(&self) -> DigestItemRef { + pub fn dref(&self) -> DigestItemRef { match *self { - Self::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), Self::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), Self::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), Self::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), - Self::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), Self::Other(ref v) => DigestItemRef::Other(v), Self::RuntimeEnvironmentUpdated => DigestItemRef::RuntimeEnvironmentUpdated, } } - /// Returns `Some` if the entry is the `ChangesTrieRoot` entry. - pub fn as_changes_trie_root(&self) -> Option<&Hash> { - self.dref().as_changes_trie_root() - } - /// Returns `Some` if this entry is the `PreRuntime` entry. pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &[u8])> { self.dref().as_pre_runtime() @@ -327,11 +252,6 @@ impl DigestItem { self.dref().as_seal() } - /// Returns `Some` if the entry is the `ChangesTrieSignal` entry. - pub fn as_changes_trie_signal(&self) -> Option<&ChangesTrieSignal> { - self.dref().as_changes_trie_signal() - } - /// Returns Some if `self` is a `DigestItem::Other`. pub fn as_other(&self) -> Option<&[u8]> { self.dref().as_other() @@ -372,20 +292,19 @@ impl DigestItem { } } -impl Encode for DigestItem { +impl Encode for DigestItem { fn encode(&self) -> Vec { self.dref().encode() } } -impl codec::EncodeLike for DigestItem {} +impl codec::EncodeLike for DigestItem {} -impl Decode for DigestItem { +impl Decode for DigestItem { #[allow(deprecated)] fn decode(input: &mut I) -> Result { let item_type: DigestItemType = Decode::decode(input)?; match item_type { - DigestItemType::ChangesTrieRoot => Ok(Self::ChangesTrieRoot(Decode::decode(input)?)), DigestItemType::PreRuntime => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(Self::PreRuntime(vals.0, vals.1)) @@ -398,23 +317,13 @@ impl Decode for DigestItem { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(Self::Seal(vals.0, vals.1)) }, - DigestItemType::ChangesTrieSignal => - Ok(Self::ChangesTrieSignal(Decode::decode(input)?)), DigestItemType::Other => Ok(Self::Other(Decode::decode(input)?)), DigestItemType::RuntimeEnvironmentUpdated => Ok(Self::RuntimeEnvironmentUpdated), } } } -impl<'a, Hash> DigestItemRef<'a, Hash> { - /// Cast this digest item into `ChangesTrieRoot`. - pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { - match *self { - Self::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), - _ => None, - } - } - +impl<'a> DigestItemRef<'a> { /// Cast this digest item into `PreRuntime` pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { @@ -439,14 +348,6 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { } } - /// Cast this digest item into `ChangesTrieSignal`. - pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { - match *self { - Self::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), - _ => None, - } - } - /// Cast this digest item into `PreRuntime` pub fn as_other(&self) -> Option<&'a [u8]> { match *self { @@ -508,15 +409,11 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { } } -impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { +impl<'a> Encode for DigestItemRef<'a> { fn encode(&self) -> Vec { let mut v = Vec::new(); match *self { - Self::ChangesTrieRoot(changes_trie_root) => { - DigestItemType::ChangesTrieRoot.encode_to(&mut v); - changes_trie_root.encode_to(&mut v); - }, Self::Consensus(val, data) => { DigestItemType::Consensus.encode_to(&mut v); (val, data).encode_to(&mut v); @@ -529,10 +426,6 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { DigestItemType::PreRuntime.encode_to(&mut v); (val, data).encode_to(&mut v); }, - Self::ChangesTrieSignal(changes_trie_signal) => { - DigestItemType::ChangesTrieSignal.encode_to(&mut v); - changes_trie_signal.encode_to(&mut v); - }, Self::Other(val) => { DigestItemType::Other.encode_to(&mut v); val.encode_to(&mut v); @@ -546,16 +439,7 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { } } -impl ChangesTrieSignal { - /// Try to cast this signal to NewConfiguration. - pub fn as_new_configuration(&self) -> Option<&Option> { - match self { - Self::NewConfiguration(config) => Some(config), - } - } -} - -impl<'a, Hash: Encode> codec::EncodeLike for DigestItemRef<'a, Hash> {} +impl<'a> codec::EncodeLike for DigestItemRef<'a> {} #[cfg(test)] mod tests { @@ -564,22 +448,18 @@ mod tests { #[test] fn should_serialize_digest() { let digest = Digest { - logs: vec![ - DigestItem::ChangesTrieRoot(4), - DigestItem::Other(vec![1, 2, 3]), - DigestItem::Seal(*b"test", vec![1, 2, 3]), - ], + logs: vec![DigestItem::Other(vec![1, 2, 3]), DigestItem::Seal(*b"test", vec![1, 2, 3])], }; assert_eq!( serde_json::to_string(&digest).unwrap(), - r#"{"logs":["0x0204000000","0x000c010203","0x05746573740c010203"]}"# + r#"{"logs":["0x000c010203","0x05746573740c010203"]}"# ); } #[test] fn digest_item_type_info() { - let type_info = DigestItem::::type_info(); + let type_info = DigestItem::type_info(); let variants = if let scale_info::TypeDef::Variant(variant) = type_info.type_def() { variant.variants() } else { @@ -589,21 +469,13 @@ mod tests { // ensure that all variants are covered by manual TypeInfo impl let check = |digest_item_type: DigestItemType| { let (variant_name, digest_item) = match digest_item_type { - DigestItemType::Other => ("Other", DigestItem::::Other(Default::default())), - DigestItemType::ChangesTrieRoot => - ("ChangesTrieRoot", DigestItem::ChangesTrieRoot(Default::default())), + DigestItemType::Other => ("Other", DigestItem::Other(Default::default())), DigestItemType::Consensus => ("Consensus", DigestItem::Consensus(Default::default(), Default::default())), DigestItemType::Seal => ("Seal", DigestItem::Seal(Default::default(), Default::default())), DigestItemType::PreRuntime => ("PreRuntime", DigestItem::PreRuntime(Default::default(), Default::default())), - DigestItemType::ChangesTrieSignal => ( - "ChangesTrieSignal", - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - Default::default(), - )), - ), DigestItemType::RuntimeEnvironmentUpdated => ("RuntimeEnvironmentUpdated", DigestItem::RuntimeEnvironmentUpdated), }; @@ -617,11 +489,9 @@ mod tests { }; check(DigestItemType::Other); - check(DigestItemType::ChangesTrieRoot); check(DigestItemType::Consensus); check(DigestItemType::Seal); check(DigestItemType::PreRuntime); - check(DigestItemType::ChangesTrieSignal); check(DigestItemType::RuntimeEnvironmentUpdated); } } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 82f081c0d70b0..21c2a6eef73af 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -51,7 +51,7 @@ pub struct Header + TryFrom, Hash: HashT> { /// The merkle root of the extrinsics. pub extrinsics_root: Hash::Output, /// A chain-specific digest of data useful for light clients or referencing auxiliary data. - pub digest: Digest, + pub digest: Digest, } #[cfg(feature = "std")] @@ -150,11 +150,11 @@ where self.parent_hash = hash } - fn digest(&self) -> &Digest { + fn digest(&self) -> &Digest { &self.digest } - fn digest_mut(&mut self) -> &mut Digest { + fn digest_mut(&mut self) -> &mut Digest { #[cfg(feature = "std")] log::debug!(target: "header", "Retrieving mutable reference to digest"); &mut self.digest @@ -165,7 +165,7 @@ where extrinsics_root: Self::Hash, state_root: Self::Hash, parent_hash: Self::Hash, - digest: Digest, + digest: Digest, ) -> Self { Self { number, extrinsics_root, state_root, parent_hash, digest } } @@ -235,10 +235,7 @@ mod tests { state_root: BlakeTwo256::hash(b"3"), extrinsics_root: BlakeTwo256::hash(b"4"), digest: crate::generic::Digest { - logs: vec![ - crate::generic::DigestItem::ChangesTrieRoot(BlakeTwo256::hash(b"5")), - crate::generic::DigestItem::Other(b"6".to_vec()), - ], + logs: vec![crate::generic::DigestItem::Other(b"6".to_vec())], }, }; @@ -251,9 +248,7 @@ mod tests { 72, 51, 123, 15, 62, 20, 134, 32, 23, 61, 170, 165, 249, 77, 0, 216, 129, 112, 93, 203, 240, 170, 131, 239, 218, 186, 97, 210, 237, 225, 235, 134, 73, 33, 73, 151, 87, 78, 32, 196, 100, 56, 138, 23, 36, 32, 210, 84, 3, 104, 43, 187, 184, 12, 73, - 104, 49, 200, 204, 31, 143, 13, 8, 2, 112, 178, 1, 53, 47, 36, 191, 28, 151, 112, - 185, 159, 143, 113, 32, 24, 33, 65, 28, 244, 20, 55, 124, 155, 140, 45, 188, 238, - 97, 219, 135, 214, 0, 4, 54 + 104, 49, 200, 204, 31, 143, 13, 4, 0, 4, 54 ], ); assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); @@ -264,10 +259,7 @@ mod tests { state_root: BlakeTwo256::hash(b"3000"), extrinsics_root: BlakeTwo256::hash(b"4000"), digest: crate::generic::Digest { - logs: vec![ - crate::generic::DigestItem::Other(b"5000".to_vec()), - crate::generic::DigestItem::ChangesTrieRoot(BlakeTwo256::hash(b"6000")), - ], + logs: vec![crate::generic::DigestItem::Other(b"5000".to_vec())], }, }; @@ -280,9 +272,7 @@ mod tests { 47, 12, 107, 88, 153, 146, 55, 21, 226, 186, 110, 48, 167, 187, 67, 183, 228, 232, 118, 136, 30, 254, 11, 87, 48, 112, 7, 97, 31, 82, 146, 110, 96, 87, 152, 68, 98, 162, 227, 222, 78, 14, 244, 194, 120, 154, 112, 97, 222, 144, 174, 101, 220, 44, - 111, 126, 54, 34, 155, 220, 253, 124, 8, 0, 16, 53, 48, 48, 48, 2, 42, 105, 109, - 150, 206, 223, 24, 44, 164, 77, 27, 137, 177, 220, 25, 170, 140, 35, 156, 246, 233, - 112, 26, 23, 192, 61, 226, 14, 84, 219, 144, 252 + 111, 126, 54, 34, 155, 220, 253, 124, 4, 0, 16, 53, 48, 48, 48 ], ); assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index 71127e88ec32c..3d8e8a0ce7faa 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -31,7 +31,7 @@ mod unchecked_extrinsic; pub use self::{ block::{Block, BlockId, SignedBlock}, checked_extrinsic::CheckedExtrinsic, - digest::{ChangesTrieSignal, Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, + digest::{Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, era::{Era, Phase}, header::Header, unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, diff --git a/primitives/runtime/src/generic/tests.rs b/primitives/runtime/src/generic/tests.rs index 095bcb717bb11..a65e212bf07ec 100644 --- a/primitives/runtime/src/generic/tests.rs +++ b/primitives/runtime/src/generic/tests.rs @@ -19,29 +19,26 @@ use super::DigestItem; use crate::codec::{Decode, Encode}; -use sp_core::H256; #[test] fn system_digest_item_encoding() { - let item = DigestItem::ChangesTrieRoot::(H256::default()); + let item = DigestItem::Consensus([1, 2, 3, 4], vec![5, 6, 7, 8]); let encoded = item.encode(); assert_eq!( encoded, vec![ - // type = DigestItemType::ChangesTrieRoot - 2, // trie root - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, + 4, // type = DigestItemType::Consensus + 1, 2, 3, 4, 16, 5, 6, 7, 8, ] ); - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); } #[test] fn non_system_digest_item_encoding() { - let item = DigestItem::Other::(vec![10, 20, 30]); + let item = DigestItem::Other(vec![10, 20, 30]); let encoded = item.encode(); assert_eq!( encoded, @@ -53,6 +50,6 @@ fn non_system_digest_item_encoding() { ] ); - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); } diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index fe9ba588adb87..4573bc84473a3 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -182,10 +182,10 @@ impl traits::Verify for TestSignature { } /// Digest item -pub type DigestItem = generic::DigestItem; +pub type DigestItem = generic::DigestItem; /// Header Digest -pub type Digest = generic::Digest; +pub type Digest = generic::Digest; /// Block Header pub type Header = generic::Header; diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 6d79d740dc4e1..f61de70e35197 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -19,7 +19,7 @@ use crate::{ codec::{Codec, Decode, Encode, MaxEncodedLen}, - generic::{Digest, DigestItem}, + generic::Digest, scale_info::{MetaType, StaticTypeInfo, TypeInfo}, transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, @@ -548,10 +548,7 @@ impl CheckEqual for sp_core::H256 { } } -impl CheckEqual for super::generic::DigestItem -where - H: Encode, -{ +impl CheckEqual for super::generic::DigestItem { #[cfg(feature = "std")] fn check_equal(&self, other: &Self) { if self != other { @@ -642,7 +639,7 @@ pub trait Header: extrinsics_root: Self::Hash, state_root: Self::Hash, parent_hash: Self::Hash, - digest: Digest, + digest: Digest, ) -> Self; /// Returns a reference to the header number. @@ -666,9 +663,9 @@ pub trait Header: fn set_parent_hash(&mut self, hash: Self::Hash); /// Returns a reference to the digest. - fn digest(&self) -> &Digest; + fn digest(&self) -> &Digest; /// Get a mutable reference to the digest. - fn digest_mut(&mut self) -> &mut Digest; + fn digest_mut(&mut self) -> &mut Digest; /// Returns the hash of the header. fn hash(&self) -> Self::Hash { @@ -763,9 +760,6 @@ pub type HashFor = <::Header as Header>::Hashing; /// Extract the number type for a block. pub type NumberFor = <::Header as Header>::Number; /// Extract the digest type for a block. -pub type DigestFor = Digest<<::Header as Header>::Hash>; -/// Extract the digest item type for a block. -pub type DigestItemFor = DigestItem<<::Header as Header>::Hash>; /// A "checkable" piece of information, used by the standard Substrate Executive in order to /// check the validity of a piece of extrinsic information, usually by verifying the signature. diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 7dcf92b06de06..eb6e2939b83fc 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -292,32 +292,6 @@ impl> Consolidate for sp_trie::GenericMem } } -/// Insert input pairs into memory db. -#[cfg(test)] -pub(crate) fn insert_into_memory_db( - mdb: &mut sp_trie::MemoryDB, - input: I, -) -> Option -where - H: Hasher, - I: IntoIterator, -{ - use sp_trie::{trie_types::TrieDBMut, TrieMut}; - - let mut root = ::Out::default(); - { - let mut trie = TrieDBMut::::new(mdb, &mut root); - for (key, value) in input { - if let Err(e) = trie.insert(&key, &value) { - log::warn!(target: "trie", "Failed to write to trie: {}", e); - return None - } - } - } - - Some(root) -} - /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. #[cfg(feature = "std")] pub struct BackendRuntimeCode<'a, B, H> { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 0bbd2d0a8e8e6..3774adc5b0368 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -309,10 +309,6 @@ impl Externalities for BasicExternalities { .encode() } - fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { - Ok(None) - } - fn storage_start_transaction(&mut self) { unimplemented!("Transactions are not supported by BasicExternalities"); } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs deleted file mode 100644 index d3c6c12122c4f..0000000000000 --- a/primitives/state-machine/src/changes_trie/build.rs +++ /dev/null @@ -1,1083 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Structures and functions required to build changes trie for given block. - -use crate::{ - backend::Backend, - changes_trie::{ - build_iterator::digest_build_iterator, - input::{ChildIndex, DigestIndex, ExtrinsicIndex, InputKey, InputPair}, - AnchorBlockId, BlockNumber, ConfigurationRange, Storage, - }, - overlayed_changes::{OverlayedChanges, OverlayedValue}, - trie_backend_essence::TrieBackendEssence, - StorageKey, -}; -use codec::{Decode, Encode}; -use hash_db::Hasher; -use num_traits::One; -use sp_core::storage::{ChildInfo, PrefixedStorageKey}; -use std::collections::{btree_map::Entry, BTreeMap}; - -/// Prepare input pairs for building a changes trie of given block. -/// -/// Returns Err if storage error has occurred OR if storage haven't returned -/// required data. -pub(crate) fn prepare_input<'a, B, H, Number>( - backend: &'a B, - storage: &'a dyn Storage, - config: ConfigurationRange<'a, Number>, - overlay: &'a OverlayedChanges, - parent: &'a AnchorBlockId, -) -> Result< - ( - impl Iterator> + 'a, - Vec<(ChildIndex, impl Iterator> + 'a)>, - Vec, - ), - String, -> -where - B: Backend, - H: Hasher + 'a, - H::Out: Encode, - Number: BlockNumber, -{ - let number = parent.number.clone() + One::one(); - let (extrinsics_input, children_extrinsics_input) = - prepare_extrinsics_input(backend, &number, overlay)?; - let (digest_input, mut children_digest_input, digest_input_blocks) = - prepare_digest_input::(parent, config, number, storage)?; - - let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); - for (child_index, ext_iter) in children_extrinsics_input.into_iter() { - let dig_iter = children_digest_input.remove(&child_index); - children_digest.push(( - child_index, - Some(ext_iter).into_iter().flatten().chain(dig_iter.into_iter().flatten()), - )); - } - for (child_index, dig_iter) in children_digest_input.into_iter() { - children_digest.push(( - child_index, - None.into_iter().flatten().chain(Some(dig_iter).into_iter().flatten()), - )); - } - - Ok((extrinsics_input.chain(digest_input), children_digest, digest_input_blocks)) -} -/// Prepare ExtrinsicIndex input pairs. -fn prepare_extrinsics_input<'a, B, H, Number>( - backend: &'a B, - block: &Number, - overlay: &'a OverlayedChanges, -) -> Result< - ( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - ), - String, -> -where - B: Backend, - H: Hasher + 'a, - Number: BlockNumber, -{ - let mut children_result = BTreeMap::new(); - - for (child_changes, child_info) in overlay.children() { - let child_index = ChildIndex:: { - block: block.clone(), - storage_key: child_info.prefixed_storage_key(), - }; - - let iter = prepare_extrinsics_input_inner( - backend, - block, - overlay, - Some(child_info.clone()), - child_changes, - )?; - children_result.insert(child_index, iter); - } - - let top = prepare_extrinsics_input_inner(backend, block, overlay, None, overlay.changes())?; - - Ok((top, children_result)) -} - -fn prepare_extrinsics_input_inner<'a, B, H, Number>( - backend: &'a B, - block: &Number, - overlay: &'a OverlayedChanges, - child_info: Option, - changes: impl Iterator, -) -> Result> + 'a, String> -where - B: Backend, - H: Hasher, - Number: BlockNumber, -{ - changes - .filter_map(|(k, v)| { - let extrinsics = v.extrinsics(); - if !extrinsics.is_empty() { - Some((k, extrinsics)) - } else { - None - } - }) - .try_fold( - BTreeMap::new(), - |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { - match map.entry(k) { - Entry::Vacant(entry) => { - // ignore temporary values (values that have null value at the end of - // operation AND are not in storage at the beginning of operation - if let Some(child_info) = child_info.as_ref() { - if !overlay - .child_storage(child_info, k) - .map(|v| v.is_some()) - .unwrap_or_default() - { - if !backend - .exists_child_storage(&child_info, k) - .map_err(|e| format!("{}", e))? - { - return Ok(map) - } - } - } else { - if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { - return Ok(map) - } - } - }; - - let extrinsics = extrinsics.into_iter().collect(); - entry.insert(( - ExtrinsicIndex { block: block.clone(), key: k.to_vec() }, - extrinsics, - )); - }, - Entry::Occupied(mut entry) => { - // we do not need to check for temporary values here, because entry is - // Occupied AND we are checking it before insertion - let entry_extrinsics = &mut entry.get_mut().1; - entry_extrinsics.extend(extrinsics.into_iter()); - entry_extrinsics.sort(); - }, - } - - Ok(map) - }, - ) - .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v))) -} - -/// Prepare DigestIndex input pairs. -fn prepare_digest_input<'a, H, Number>( - parent: &'a AnchorBlockId, - config: ConfigurationRange, - block: Number, - storage: &'a dyn Storage, -) -> Result< - ( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - Vec, - ), - String, -> -where - H: Hasher, - H::Out: 'a + Encode, - Number: BlockNumber, -{ - let build_skewed_digest = config.end.as_ref() == Some(&block); - let block_for_digest = if build_skewed_digest { - config - .config - .next_max_level_digest_range(config.zero.clone(), block.clone()) - .map(|(_, end)| end) - .unwrap_or_else(|| block.clone()) - } else { - block.clone() - }; - - let digest_input_blocks = digest_build_iterator(config, block_for_digest).collect::>(); - digest_input_blocks - .clone() - .into_iter() - .try_fold( - (BTreeMap::new(), BTreeMap::new()), - move |(mut map, mut child_map), digest_build_block| { - let extrinsic_prefix = - ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); - let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); - let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); - let trie_root = storage.root(parent, digest_build_block.clone())?; - let trie_root = trie_root.ok_or_else(|| { - format!("No changes trie root for block {}", digest_build_block.clone()) - })?; - - let insert_to_map = |map: &mut BTreeMap<_, _>, key: StorageKey| { - match map.entry(key.clone()) { - Entry::Vacant(entry) => { - entry.insert(( - DigestIndex { block: block.clone(), key }, - vec![digest_build_block.clone()], - )); - }, - Entry::Occupied(mut entry) => { - // DigestIndexValue must be sorted. Here we are relying on the fact that - // digest_build_iterator() returns blocks in ascending order => we only - // need to check for duplicates - // - // is_dup_block could be true when key has been changed in both digest - // block AND other blocks that it covers - let is_dup_block = entry.get().1.last() == Some(&digest_build_block); - if !is_dup_block { - entry.get_mut().1.push(digest_build_block.clone()); - } - }, - } - }; - - // try to get all updated keys from cache - let populated_from_cache = - storage.with_cached_changed_keys(&trie_root, &mut |changed_keys| { - for (storage_key, changed_keys) in changed_keys { - let map = match storage_key { - Some(storage_key) => child_map - .entry(ChildIndex:: { - block: block.clone(), - storage_key: storage_key.clone(), - }) - .or_default(), - None => &mut map, - }; - for changed_key in changed_keys.iter().cloned() { - insert_to_map(map, changed_key); - } - } - }); - if populated_from_cache { - return Ok((map, child_map)) - } - - let mut children_roots = BTreeMap::::new(); - { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - - trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { - if let Ok(InputKey::ChildIndex::(trie_key)) = - Decode::decode(&mut key) - { - if let Ok(value) = >::decode(&mut value) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.insert(trie_key.storage_key, trie_root); - } - } - }); - - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { - if let Ok(InputKey::DigestIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - } - - for (storage_key, trie_root) in children_roots.into_iter() { - let child_index = ChildIndex:: { block: block.clone(), storage_key }; - - let mut map = child_map.entry(child_index).or_default(); - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { - if let Ok(InputKey::DigestIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - } - Ok((map, child_map)) - }, - ) - .map(|(pairs, child_pairs)| { - ( - pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), - child_pairs - .into_iter() - .map(|(sk, pairs)| { - (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v))) - }) - .collect(), - digest_input_blocks, - ) - }) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - changes_trie::{ - build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, - storage::InMemoryStorage, - Configuration, RootsStorage, - }, - InMemoryBackend, - }; - use sp_core::Blake2Hasher; - - fn prepare_for_build( - zero: u64, - ) -> ( - InMemoryBackend, - InMemoryStorage, - OverlayedChanges, - Configuration, - ) { - let child_info_1 = ChildInfo::new_default(b"storage_key1"); - let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = vec![ - (vec![100], vec![255]), - (vec![101], vec![255]), - (vec![102], vec![255]), - (vec![103], vec![255]), - (vec![104], vec![255]), - (vec![105], vec![255]), - ] - .into_iter() - .collect::>() - .into(); - let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); - let storage = InMemoryStorage::with_inputs( - vec![ - ( - zero + 1, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![100] }, - vec![1, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![101] }, - vec![0, 2], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![105] }, - vec![0, 2, 4], - ), - ], - ), - ( - zero + 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0], - )], - ), - ( - zero + 3, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 3, key: vec![100] }, - vec![0], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 3, key: vec![105] }, - vec![1], - ), - ], - ), - ( - zero + 4, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3], - ), - ], - ), - (zero + 5, Vec::new()), - ( - zero + 6, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 6, key: vec![105] }, - vec![2], - )], - ), - (zero + 7, Vec::new()), - ( - zero + 8, - vec![InputPair::DigestIndex( - DigestIndex { block: zero + 8, key: vec![105] }, - vec![zero + 6], - )], - ), - (zero + 9, Vec::new()), - (zero + 10, Vec::new()), - (zero + 11, Vec::new()), - (zero + 12, Vec::new()), - (zero + 13, Vec::new()), - (zero + 14, Vec::new()), - (zero + 15, Vec::new()), - ], - vec![( - prefixed_child_trie_key1.clone(), - vec![ - ( - zero + 1, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![100] }, - vec![1, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![101] }, - vec![0, 2], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![105] }, - vec![0, 2, 4], - ), - ], - ), - ( - zero + 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0], - )], - ), - ( - zero + 4, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0, 3], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2], - ), - ], - ), - ], - )], - ); - - let mut changes = OverlayedChanges::default(); - changes.set_collect_extrinsics(true); - - changes.start_transaction(); - - changes.set_extrinsic_index(1); - changes.set_storage(vec![101], Some(vec![203])); - - changes.set_extrinsic_index(3); - changes.set_storage(vec![100], Some(vec![202])); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![202])); - - changes.commit_transaction().unwrap(); - - changes.set_extrinsic_index(0); - changes.set_storage(vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_storage(vec![100], Some(vec![200])); - - changes.set_extrinsic_index(0); - changes.set_storage(vec![103], Some(vec![0])); - changes.set_extrinsic_index(1); - changes.set_storage(vec![103], None); - - changes.set_extrinsic_index(0); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![200])); - - changes.set_extrinsic_index(0); - changes.set_child_storage(&child_info_2, vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_child_storage(&child_info_2, vec![100], Some(vec![200])); - - changes.set_extrinsic_index(1); - - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - (backend, storage, changes, config) - } - - fn configuration_range<'a>( - config: &'a Configuration, - zero: u64, - ) -> ConfigurationRange<'a, u64> { - ConfigurationRange { config, zero, end: None } - } - - #[test] - fn build_changes_trie_nodes_on_non_digest_block() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![103] }, - vec![0, 1] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, - vec![0, 2, 3] - ),] - ), - ( - ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l1() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1] - ), - ] - ), - ( - ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l2() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![100] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![101] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![102] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![103] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![105] }, - vec![zero + 4, zero + 8] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![102] }, - vec![zero + 4] - ), - ] - ), - ( - ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_skewed_digest_block() { - fn test_with_zero(zero: u64) { - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 10 }; - - let mut configuration_range = configuration_range(&config, zero); - let changes_trie_nodes = - prepare_input(&backend, &storage, configuration_range.clone(), &changes, &parent) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![103] }, - vec![0, 1] - ), - ] - ); - - configuration_range.end = Some(zero + 11); - let changes_trie_nodes = - prepare_input(&backend, &storage, configuration_range, &changes, &parent).unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![100] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![101] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![102] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![103] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![105] }, - vec![zero + 4, zero + 8] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_ignores_temporary_storage_values() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, mut changes, config) = prepare_for_build(zero); - - // 110: missing from backend, set to None in overlay - changes.set_storage(vec![110], None); - - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1] - ), - ] - ), - ( - ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn cache_is_used_when_changes_trie_is_built() { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, mut storage, changes, config) = prepare_for_build(0); - let parent = AnchorBlockId { hash: Default::default(), number: 15 }; - - // override some actual values from storage with values from the cache - // - // top-level storage: - // (keys 100, 101, 103, 105 are now missing from block#4 => they do not appear - // in l2 digest at block 16) - // - // "1" child storage: - // key 102 is now missing from block#4 => it doesn't appear in l2 digest at block 16 - // (keys 103, 104) are now added to block#4 => they appear in l2 digest at block 16 - // - // "2" child storage: - // (keys 105, 106) are now added to block#4 => they appear in l2 digest at block 16 - let trie_root4 = storage.root(&parent, 4).unwrap().unwrap(); - let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) - .set_digest_input_blocks(vec![1, 2, 3]) - .insert(None, vec![vec![100], vec![102]].into_iter().collect()) - .insert(Some(child_trie_key1.clone()), vec![vec![103], vec![104]].into_iter().collect()) - .insert(Some(child_trie_key2.clone()), vec![vec![105], vec![106]].into_iter().collect()) - .complete(4, &trie_root4); - storage.cache_mut().perform(cached_data4); - - let (root_changes_trie_nodes, child_changes_tries_nodes, _) = - prepare_input(&backend, &storage, configuration_range(&config, 0), &changes, &parent) - .unwrap(); - assert_eq!( - root_changes_trie_nodes.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), - ] - ); - - let child_changes_tries_nodes = child_changes_tries_nodes - .into_iter() - .map(|(k, i)| (k, i.collect::>())) - .collect::>(); - assert_eq!( - child_changes_tries_nodes - .get(&ChildIndex { block: 16u64, storage_key: child_trie_key1.clone() }) - .unwrap(), - &vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![103] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![104] }, vec![4]), - ], - ); - assert_eq!( - child_changes_tries_nodes - .get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }) - .unwrap(), - &vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16u64, key: vec![100] }, - vec![0, 2] - ), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![105] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![106] }, vec![4]), - ], - ); - } -} diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs deleted file mode 100644 index 04820242d9d08..0000000000000 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ /dev/null @@ -1,278 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes tries build cache. - -use std::collections::{HashMap, HashSet}; - -use crate::StorageKey; -use sp_core::storage::PrefixedStorageKey; - -/// Changes trie build cache. -/// -/// Helps to avoid read of changes tries from the database when digest trie -/// is built. It holds changed keys for every block (indexed by changes trie -/// root) that could be referenced by future digest items. For digest entries -/// it also holds keys covered by this digest. Entries for top level digests -/// are never created, because they'll never be used to build other digests. -/// -/// Entries are pruned from the cache once digest block that is using this entry -/// is inserted (because digest block will includes all keys from this entry). -/// When there's a fork, entries are pruned when first changes trie is inserted. -pub struct BuildCache { - /// Map of block (implies changes trie) number => changes trie root. - roots_by_number: HashMap, - /// Map of changes trie root => set of storage keys that are in this trie. - /// The `Option>` in inner `HashMap` stands for the child storage key. - /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. - /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by - /// the key. - changed_keys: HashMap, HashSet>>, -} - -/// The action to perform when block-with-changes-trie is imported. -#[derive(Debug, PartialEq)] -pub enum CacheAction { - /// Cache data that has been collected when CT has been built. - CacheBuildData(CachedBuildData), - /// Clear cache from all existing entries. - Clear, -} - -/// The data that has been cached during changes trie building. -#[derive(Debug, PartialEq)] -pub struct CachedBuildData { - block: N, - trie_root: H, - digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, -} - -/// The action to perform when block-with-changes-trie is imported. -#[derive(Debug, PartialEq)] -pub(crate) enum IncompleteCacheAction { - /// Cache data that has been collected when CT has been built. - CacheBuildData(IncompleteCachedBuildData), - /// Clear cache from all existing entries. - Clear, -} - -/// The data (without changes trie root) that has been cached during changes trie building. -#[derive(Debug, PartialEq)] -pub(crate) struct IncompleteCachedBuildData { - digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, -} - -impl BuildCache -where - N: Eq + ::std::hash::Hash, - H: Eq + ::std::hash::Hash + Clone, -{ - /// Create new changes trie build cache. - pub fn new() -> Self { - BuildCache { roots_by_number: HashMap::new(), changed_keys: HashMap::new() } - } - - /// Get cached changed keys for changes trie with given root. - pub fn get( - &self, - root: &H, - ) -> Option<&HashMap, HashSet>> { - self.changed_keys.get(&root) - } - - /// Execute given functor with cached entry for given block. - /// Returns true if the functor has been called and false otherwise. - pub fn with_changed_keys( - &self, - root: &H, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool { - match self.changed_keys.get(&root) { - Some(changed_keys) => { - functor(changed_keys); - true - }, - None => false, - } - } - - /// Insert data into cache. - pub fn perform(&mut self, action: CacheAction) { - match action { - CacheAction::CacheBuildData(data) => { - self.roots_by_number.insert(data.block, data.trie_root.clone()); - self.changed_keys.insert(data.trie_root, data.changed_keys); - - for digest_input_block in data.digest_input_blocks { - let digest_input_block_hash = self.roots_by_number.remove(&digest_input_block); - if let Some(digest_input_block_hash) = digest_input_block_hash { - self.changed_keys.remove(&digest_input_block_hash); - } - } - }, - CacheAction::Clear => { - self.roots_by_number.clear(); - self.changed_keys.clear(); - }, - } - } -} - -impl IncompleteCacheAction { - /// Returns true if we need to collect changed keys for this action. - pub fn collects_changed_keys(&self) -> bool { - match *self { - IncompleteCacheAction::CacheBuildData(_) => true, - IncompleteCacheAction::Clear => false, - } - } - - /// Complete cache action with computed changes trie root. - pub(crate) fn complete(self, block: N, trie_root: &H) -> CacheAction { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - CacheAction::CacheBuildData(build_data.complete(block, trie_root.clone())), - IncompleteCacheAction::Clear => CacheAction::Clear, - } - } - - /// Set numbers of blocks that are superseded by this new entry. - /// - /// If/when this build data is committed to the cache, entries for these blocks - /// will be removed from the cache. - pub(crate) fn set_digest_input_blocks(self, digest_input_blocks: Vec) -> Self { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData( - build_data.set_digest_input_blocks(digest_input_blocks), - ), - IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, - } - } - - /// Insert changed keys of given storage into cached data. - pub(crate) fn insert( - self, - storage_key: Option, - changed_keys: HashSet, - ) -> Self { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData(build_data.insert(storage_key, changed_keys)), - IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, - } - } -} - -impl IncompleteCachedBuildData { - /// Create new cached data. - pub(crate) fn new() -> Self { - IncompleteCachedBuildData { digest_input_blocks: Vec::new(), changed_keys: HashMap::new() } - } - - fn complete(self, block: N, trie_root: H) -> CachedBuildData { - CachedBuildData { - block, - trie_root, - digest_input_blocks: self.digest_input_blocks, - changed_keys: self.changed_keys, - } - } - - fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { - self.digest_input_blocks = digest_input_blocks; - self - } - - fn insert( - mut self, - storage_key: Option, - changed_keys: HashSet, - ) -> Self { - self.changed_keys.insert(storage_key, changed_keys); - self - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn updated_keys_are_stored_when_non_top_level_digest_is_built() { - let mut data = IncompleteCachedBuildData::::new(); - data = data.insert(None, vec![vec![1]].into_iter().collect()); - assert_eq!(data.changed_keys.len(), 1); - - let mut cache = BuildCache::new(); - cache.perform(CacheAction::CacheBuildData(data.complete(1, 1))); - assert_eq!(cache.changed_keys.len(), 1); - assert_eq!( - cache.get(&1).unwrap().clone(), - vec![(None, vec![vec![1]].into_iter().collect())].into_iter().collect(), - ); - } - - #[test] - fn obsolete_entries_are_purged_when_new_ct_is_built() { - let mut cache = BuildCache::::new(); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![1]].into_iter().collect()) - .complete(1, 1), - )); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![2]].into_iter().collect()) - .complete(2, 2), - )); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![3]].into_iter().collect()) - .complete(3, 3), - )); - - assert_eq!(cache.changed_keys.len(), 3); - - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .set_digest_input_blocks(vec![1, 2, 3]) - .complete(4, 4), - )); - - assert_eq!(cache.changed_keys.len(), 1); - - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![8]].into_iter().collect()) - .complete(8, 8), - )); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![12]].into_iter().collect()) - .complete(12, 12), - )); - - assert_eq!(cache.changed_keys.len(), 3); - - cache.perform(CacheAction::Clear); - - assert_eq!(cache.changed_keys.len(), 0); - } -} diff --git a/primitives/state-machine/src/changes_trie/build_iterator.rs b/primitives/state-machine/src/changes_trie/build_iterator.rs deleted file mode 100644 index 62bb00a2f8829..0000000000000 --- a/primitives/state-machine/src/changes_trie/build_iterator.rs +++ /dev/null @@ -1,487 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Structures and functions to return blocks whose changes are to be included -//! in given block's changes trie. - -use crate::changes_trie::{BlockNumber, ConfigurationRange}; -use num_traits::Zero; - -/// Returns iterator of OTHER blocks that are required for inclusion into -/// changes trie of given block. Blocks are guaranteed to be returned in -/// ascending order. -/// -/// Skewed digest is built IF block >= config.end. -pub fn digest_build_iterator<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - block: Number, -) -> DigestBuildIterator { - // prepare digest build parameters - let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) - { - Some((current_level, digest_interval, digest_step)) => - (current_level, digest_interval, digest_step), - None => return DigestBuildIterator::empty(), - }; - - DigestBuildIterator::new( - block.clone(), - config.end.unwrap_or(block), - config.config.digest_interval, - digest_step, - ) -} - -/// Changes trie build iterator that returns numbers of OTHER blocks that are -/// required for inclusion into changes trie of given block. -#[derive(Debug)] -pub struct DigestBuildIterator { - /// Block we're building changes trie for. It could (logically) be a post-end block if we are - /// creating skewed digest. - block: Number, - /// Block that is a last block where current configuration is active. We have never yet created - /// anything after this block => digest that we're creating can't reference any blocks that are - /// >= end. - end: Number, - /// Interval of L1 digest blocks. - digest_interval: u32, - /// Max step that could be used when digest is created. - max_step: u32, - - // Mutable data below: - /// Step of current blocks range. - current_step: u32, - /// Reverse step of current blocks range. - current_step_reverse: u32, - /// Current blocks range. - current_range: Option>, - /// Last block that we have returned. - last_block: Option, -} - -impl DigestBuildIterator { - /// Create new digest build iterator. - pub fn new(block: Number, end: Number, digest_interval: u32, max_step: u32) -> Self { - DigestBuildIterator { - block, - end, - digest_interval, - max_step, - current_step: max_step, - current_step_reverse: 0, - current_range: None, - last_block: None, - } - } - - /// Create empty digest build iterator. - pub fn empty() -> Self { - Self::new(Zero::zero(), Zero::zero(), 0, 0) - } -} - -impl Iterator for DigestBuildIterator { - type Item = Number; - - fn next(&mut self) -> Option { - // when we're building skewed digest, we might want to skip some blocks if - // they're not covered by current configuration - loop { - if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { - if next < self.end { - self.last_block = Some(next.clone()); - return Some(next) - } - } - - // we are safe to use non-checking mul/sub versions here because: - // DigestBuildIterator is created only by internal function that is checking - // that all multiplications/subtractions are safe within max_step limit - - let next_step_reverse = if self.current_step_reverse == 0 { - 1 - } else { - self.current_step_reverse * self.digest_interval - }; - if next_step_reverse > self.max_step { - return None - } - - self.current_step_reverse = next_step_reverse; - self.current_range = Some(BlocksRange::new( - match self.last_block.clone() { - Some(last_block) => last_block + self.current_step.into(), - None => - self.block.clone() - - (self.current_step * self.digest_interval - self.current_step).into(), - }, - self.block.clone(), - self.current_step.into(), - )); - - self.current_step = self.current_step / self.digest_interval; - if self.current_step == 0 { - self.current_step = 1; - } - } - } -} - -/// Blocks range iterator with builtin step_by support. -#[derive(Debug)] -struct BlocksRange { - current: Number, - end: Number, - step: Number, -} - -impl BlocksRange { - pub fn new(begin: Number, end: Number, step: Number) -> Self { - BlocksRange { current: begin, end, step } - } -} - -impl Iterator for BlocksRange { - type Item = Number; - - fn next(&mut self) -> Option { - if self.current >= self.end { - return None - } - - let current = Some(self.current.clone()); - self.current += self.step.clone(); - current - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::changes_trie::Configuration; - - fn digest_build_iterator( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - end: Option, - ) -> DigestBuildIterator { - super::digest_build_iterator( - ConfigurationRange { - config: &Configuration { digest_interval, digest_levels }, - zero, - end, - }, - block, - ) - } - - fn digest_build_iterator_basic( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - ) -> (u64, u32, u32) { - let iter = digest_build_iterator(digest_interval, digest_levels, zero, block, None); - (iter.block, iter.digest_interval, iter.max_step) - } - - fn digest_build_iterator_blocks( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - end: Option, - ) -> Vec { - digest_build_iterator(digest_interval, digest_levels, zero, block, end).collect() - } - - #[test] - fn suggest_digest_inclusion_returns_empty_iterator() { - fn test_with_zero(zero: u64) { - let empty = (0, 0, 0); - assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 0), empty, "block is 0"); - assert_eq!( - digest_build_iterator_basic(0, 16, zero, zero + 64), - empty, - "digest_interval is 0" - ); - assert_eq!( - digest_build_iterator_basic(1, 16, zero, zero + 64), - empty, - "digest_interval is 1" - ); - assert_eq!( - digest_build_iterator_basic(4, 0, zero, zero + 64), - empty, - "digest_levels is 0" - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 1), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 2), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 15), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 17), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(::std::u32::MAX / 2 + 1, 16, zero, ::std::u64::MAX,), - empty, - "digest_interval * 2 is greater than u64::MAX" - ); - } - - test_with_zero(0); - test_with_zero(1); - test_with_zero(2); - test_with_zero(4); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level1_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 1, zero, zero + 16), - (zero + 16, 16, 1), - "!(block % interval) && first digest level == block", - ); - assert_eq!( - digest_build_iterator_basic(16, 1, zero, zero + 256), - (zero + 256, 16, 1), - "!(block % interval^2), but there's only 1 digest level", - ); - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 32), - (zero + 32, 16, 1), - "second level digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 4080), - (zero + 4080, 16, 1), - "second && third level digest are not required for this block", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level2_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 256), - (zero + 256, 16, 16), - "second level digest", - ); - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 4096), - (zero + 4096, 16, 16), - "!(block % interval^3), but there's only 2 digest levels", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level3_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 4096), - (zero + 4096, 16, 256), - "third level digest: beginning", - ); - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 8192), - (zero + 8192, 16, 256), - "third level digest: next", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 1, zero, zero + 16, None), - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - .iter() - .map(|item| zero + item) - .collect::>() - ); - assert_eq!( - digest_build_iterator_blocks(16, 1, zero, zero + 256, None), - [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] - .iter() - .map(|item| zero + item) - .collect::>() - ); - assert_eq!( - digest_build_iterator_blocks(16, 2, zero, zero + 32, None), - [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - .iter() - .map(|item| zero + item) - .collect::>() - ); - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), - [ - 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, - 4078, 4079 - ] - .iter() - .map(|item| zero + item) - .collect::>() - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 2, zero, zero + 256, None), - [ - // level2 points to previous 16-1 level1 digests: - 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, - // level2 is a level1 digest of 16-1 previous blocks: - 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - assert_eq!( - digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), - [ - // level2 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, - 4064, 4080, // level2 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, - 4094, 4095, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), - [ - // level3 points to previous 16-1 level2 digests: - 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, - 3840, // level3 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, - 4064, 4080, // level3 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, - 4094, 4095, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_skewed_digest_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), - [ - // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: - 256, 512, 768, 1024, 1280, - // level3 MUST point to previous 16-1 level1 digests, BUT there are only 3: - 1296, 1312, 1328, - // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only - // 9: - 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_skewed_digest_blocks_skipping_level() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), - [ - // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: - 256, 512, 768, 1024, 1280, - // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY - // L1-digests: level3 MUST be a level1 digest of 16-1 previous blocks, BUT - // there are only 3: - 1281, 1282, 1283, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } -} diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs deleted file mode 100644 index 9343a226a3aa8..0000000000000 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ /dev/null @@ -1,748 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Functions + iterator that traverses changes tries and returns all -//! (block, extrinsic) pairs where given key has been changed. - -use crate::{ - changes_trie::{ - input::{ChildIndex, DigestIndex, DigestIndexValue, ExtrinsicIndex, ExtrinsicIndexValue}, - storage::{InMemoryStorage, TrieBackendAdapter}, - surface_iterator::{surface_iterator, SurfaceIterator}, - AnchorBlockId, BlockNumber, ConfigurationRange, RootsStorage, Storage, - }, - proving_backend::ProvingBackendRecorder, - trie_backend_essence::TrieBackendEssence, -}; -use codec::{Codec, Decode, Encode}; -use hash_db::Hasher; -use num_traits::Zero; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::Recorder; -use std::{cell::RefCell, collections::VecDeque}; - -/// Return changes of given key at given blocks range. -/// `max` is the number of best known block. -/// Changes are returned in descending order (i.e. last block comes first). -pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - storage: &'a dyn Storage, - begin: Number, - end: &'a AnchorBlockId, - max: Number, - storage_key: Option<&'a PrefixedStorageKey>, - key: &'a [u8], -) -> Result, String> { - // we can't query any roots before root - let max = std::cmp::min(max, end.number.clone()); - - Ok(DrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage: storage.as_roots_storage(), - storage, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator(config, max, begin, end.number.clone())?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - }) -} - -/// Returns proof of changes of given key at given blocks range. -/// `max` is the number of best known block. -pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - storage: &dyn Storage, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&PrefixedStorageKey>, - key: &[u8], -) -> Result>, String> -where - H::Out: Codec, -{ - // we can't query any roots before root - let max = std::cmp::min(max, end.number.clone()); - - let mut iter = ProvingDrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage: storage.as_roots_storage(), - storage, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator(config, max, begin, end.number.clone())?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - proof_recorder: Default::default(), - }; - - // iterate to collect proof - while let Some(item) = iter.next() { - item?; - } - - Ok(iter.extract_proof()) -} - -/// Check key changes proof and return changes of the key at given blocks range. -/// `max` is the number of best known block. -/// Changes are returned in descending order (i.e. last block comes first). -pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - roots_storage: &dyn RootsStorage, - proof: Vec>, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&PrefixedStorageKey>, - key: &[u8], -) -> Result, String> -where - H::Out: Encode, -{ - key_changes_proof_check_with_db( - config, - roots_storage, - &InMemoryStorage::with_proof(proof), - begin, - end, - max, - storage_key, - key, - ) -} - -/// Similar to the `key_changes_proof_check` function, but works with prepared proof storage. -pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - roots_storage: &dyn RootsStorage, - proof_db: &InMemoryStorage, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&PrefixedStorageKey>, - key: &[u8], -) -> Result, String> -where - H::Out: Encode, -{ - // we can't query any roots before root - let max = std::cmp::min(max, end.number.clone()); - - DrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage, - storage: proof_db, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator(config, max, begin, end.number.clone())?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - } - .collect() -} - -/// Drilldown iterator - receives 'digest points' from surface iterator and explores -/// every point until extrinsic is found. -pub struct DrilldownIteratorEssence<'a, H, Number> -where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, -{ - storage_key: Option<&'a PrefixedStorageKey>, - key: &'a [u8], - roots_storage: &'a dyn RootsStorage, - storage: &'a dyn Storage, - begin: Number, - end: &'a AnchorBlockId, - config: ConfigurationRange<'a, Number>, - surface: SurfaceIterator<'a, Number>, - - extrinsics: VecDeque<(Number, u32)>, - blocks: VecDeque<(Number, Option)>, - - _hasher: ::std::marker::PhantomData, -} - -impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> -where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, -{ - pub fn next(&mut self, trie_reader: F) -> Option> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, - { - match self.do_next(trie_reader) { - Ok(Some(res)) => Some(Ok(res)), - Ok(None) => None, - Err(err) => Some(Err(err)), - } - } - - fn do_next(&mut self, mut trie_reader: F) -> Result, String> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, - { - loop { - if let Some((block, extrinsic)) = self.extrinsics.pop_front() { - return Ok(Some((block, extrinsic))) - } - - if let Some((block, level)) = self.blocks.pop_front() { - // not having a changes trie root is an error because: - // we never query roots for future blocks - // AND trie roots for old blocks are known (both on full + light node) - let trie_root = - self.roots_storage.root(&self.end, block.clone())?.ok_or_else(|| { - format!("Changes trie root for block {} is not found", block.clone()) - })?; - let trie_root = if let Some(storage_key) = self.storage_key { - let child_key = - ChildIndex { block: block.clone(), storage_key: storage_key.clone() } - .encode(); - if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? - .and_then(|v| >::decode(&mut &v[..]).ok()) - .map(|v| { - let mut hash = H::Out::default(); - hash.as_mut().copy_from_slice(&v[..]); - hash - }) { - trie_root - } else { - continue - } - } else { - trie_root - }; - - // only return extrinsics for blocks before self.max - // most of blocks will be filtered out before pushing to `self.blocks` - // here we just throwing away changes at digest blocks we're processing - debug_assert!( - block >= self.begin, - "We shall not touch digests earlier than a range' begin" - ); - if block <= self.end.number { - let extrinsics_key = - ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let extrinsics = trie_reader(self.storage, trie_root, &extrinsics_key); - if let Some(extrinsics) = extrinsics? { - if let Ok(extrinsics) = ExtrinsicIndexValue::decode(&mut &extrinsics[..]) { - self.extrinsics - .extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); - } - } - } - - let blocks_key = - DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let blocks = trie_reader(self.storage, trie_root, &blocks_key); - if let Some(blocks) = blocks? { - if let Ok(blocks) = >::decode(&mut &blocks[..]) { - // filter level0 blocks here because we tend to use digest blocks, - // AND digest block changes could also include changes for out-of-range - // blocks - let begin = self.begin.clone(); - let end = self.end.number.clone(); - let config = self.config.clone(); - self.blocks.extend( - blocks - .into_iter() - .rev() - .filter(|b| { - level.map(|level| level > 1).unwrap_or(true) || - (*b >= begin && *b <= end) - }) - .map(|b| { - let prev_level = - level.map(|level| Some(level - 1)).unwrap_or_else(|| { - Some( - config - .config - .digest_level_at_block( - config.zero.clone(), - b.clone(), - ) - .map(|(level, _, _)| level) - .unwrap_or_else(|| Zero::zero()), - ) - }); - (b, prev_level) - }), - ); - } - } - - continue - } - - match self.surface.next() { - Some(Ok(block)) => self.blocks.push_back(block), - Some(Err(err)) => return Err(err), - None => return Ok(None), - } - } - } -} - -/// Exploring drilldown operator. -pub struct DrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, -{ - essence: DrilldownIteratorEssence<'a, H, Number>, -} - -impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> -where - H::Out: Encode, -{ - type Item = Result<(Number, u32), String>; - - fn next(&mut self) -> Option { - self.essence.next(|storage, root, key| { - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key) - }) - } -} - -/// Proving drilldown iterator. -struct ProvingDrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, -{ - essence: DrilldownIteratorEssence<'a, H, Number>, - proof_recorder: RefCell>, -} - -impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, -{ - /// Consume the iterator, extracting the gathered proof in lexicographical order - /// by value. - pub fn extract_proof(self) -> Vec> { - self.proof_recorder - .into_inner() - .drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect() - } -} - -impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a + Codec, -{ - type Item = Result<(Number, u32), String>; - - fn next(&mut self) -> Option { - let proof_recorder = &mut *self - .proof_recorder - .try_borrow_mut() - .expect("only fails when already borrowed; storage() is non-reentrant; qed"); - self.essence.next(|storage, root, key| { - ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder, - } - .storage(key) - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::changes_trie::{input::InputPair, storage::InMemoryStorage, Configuration}; - use sp_runtime::traits::BlakeTwo256; - use std::iter::FromIterator; - - fn child_key() -> PrefixedStorageKey { - let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); - child_info.prefixed_storage_key() - } - - fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let backend = InMemoryStorage::with_inputs( - vec![ - // digest: 1..4 => [(3, 0)] - (1, vec![]), - (2, vec![]), - ( - 3, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 3, key: vec![42] }, - vec![0], - )], - ), - (4, vec![InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3])]), - // digest: 5..8 => [(6, 3), (8, 1+2)] - (5, vec![]), - ( - 6, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 6, key: vec![42] }, - vec![3], - )], - ), - (7, vec![]), - ( - 8, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 8, key: vec![42] }, - vec![1, 2], - ), - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), - ], - ), - // digest: 9..12 => [] - (9, vec![]), - (10, vec![]), - (11, vec![]), - (12, vec![]), - // digest: 0..16 => [4, 8] - (13, vec![]), - (14, vec![]), - (15, vec![]), - ( - 16, - vec![InputPair::DigestIndex( - DigestIndex { block: 16, key: vec![42] }, - vec![4, 8], - )], - ), - ], - vec![( - child_key(), - vec![ - ( - 1, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 1, key: vec![42] }, - vec![0], - )], - ), - ( - 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 2, key: vec![42] }, - vec![3], - )], - ), - ( - 16, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16, key: vec![42] }, - vec![5], - ), - InputPair::DigestIndex( - DigestIndex { block: 16, key: vec![42] }, - vec![2], - ), - ], - ), - ], - )], - ); - - (config, backend) - } - - fn configuration_range<'a>( - config: &'a Configuration, - zero: u64, - ) -> ConfigurationRange<'a, u64> { - ConfigurationRange { config, zero, end: None } - } - - #[test] - fn drilldown_iterator_works() { - let (config, storage) = prepare_for_drilldown(); - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 2 }, - 4, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 3 }, - 4, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 7 }, - 7, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 7, - &AnchorBlockId { hash: Default::default(), number: 8 }, - 8, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 5, - &AnchorBlockId { hash: Default::default(), number: 7 }, - 8, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(6, 3)])); - } - - #[test] - fn drilldown_iterator_fails_when_storage_fails() { - let (config, storage) = prepare_for_drilldown(); - storage.clear_storage(); - - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 1000, - None, - &[42], - ) - .and_then(|i| i.collect::, _>>()) - .is_err()); - - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 1000, - Some(&child_key()), - &[42], - ) - .and_then(|i| i.collect::, _>>()) - .is_err()); - } - - #[test] - fn drilldown_iterator_fails_when_range_is_invalid() { - let (config, storage) = prepare_for_drilldown(); - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 50, - None, - &[42], - ) - .is_err()); - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 20, - &AnchorBlockId { hash: Default::default(), number: 10 }, - 100, - None, - &[42], - ) - .is_err()); - } - - #[test] - fn proving_drilldown_iterator_works() { - // happens on remote full node: - - // create drilldown iterator that records all trie nodes during drilldown - let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof = key_changes_proof::( - configuration_range(&remote_config, 0), - &remote_storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - None, - &[42], - ) - .unwrap(); - - let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof_child = key_changes_proof::( - configuration_range(&remote_config, 0), - &remote_storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - Some(&child_key()), - &[42], - ) - .unwrap(); - - // happens on local light node: - - // create drilldown iterator that works the same, but only depends on trie - let (local_config, local_storage) = prepare_for_drilldown(); - local_storage.clear_storage(); - let local_result = key_changes_proof_check::( - configuration_range(&local_config, 0), - &local_storage, - remote_proof, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - None, - &[42], - ); - - let (local_config, local_storage) = prepare_for_drilldown(); - local_storage.clear_storage(); - let local_result_child = key_changes_proof_check::( - configuration_range(&local_config, 0), - &local_storage, - remote_proof_child, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - Some(&child_key()), - &[42], - ); - - // check that drilldown result is the same as if it was happening at the full node - assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - assert_eq!(local_result_child, Ok(vec![(16, 5), (2, 3)])); - } - - #[test] - fn drilldown_iterator_works_with_skewed_digest() { - let config = Configuration { digest_interval: 4, digest_levels: 3 }; - let mut config_range = configuration_range(&config, 0); - config_range.end = Some(91); - - // when 4^3 deactivates at block 91: - // last L3 digest has been created at block#64 - // skewed digest covers: - // L2 digests at blocks: 80 - // L1 digests at blocks: 84, 88 - // regular blocks: 89, 90, 91 - let mut input = (1u64..92u64).map(|b| (b, vec![])).collect::>(); - // changed at block#63 and covered by L3 digest at block#64 - input[63 - 1] - .1 - .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); - input[64 - 1] - .1 - .push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); - // changed at block#79 and covered by L2 digest at block#80 + skewed digest at block#91 - input[79 - 1] - .1 - .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); - input[80 - 1] - .1 - .push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); - input[91 - 1] - .1 - .push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); - let storage = InMemoryStorage::with_inputs(input, vec![]); - - let drilldown_result = key_changes::( - config_range, - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 91 }, - 100_000u64, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); - } -} diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs deleted file mode 100644 index af0a423e57267..0000000000000 --- a/primitives/state-machine/src/changes_trie/input.rs +++ /dev/null @@ -1,207 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Different types of changes trie input pairs. - -use crate::{changes_trie::BlockNumber, StorageKey, StorageValue}; -use codec::{Decode, Encode, Error, Input, Output}; -use sp_core::storage::PrefixedStorageKey; - -/// Key of { changed key => set of extrinsic indices } mapping. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ExtrinsicIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub key: StorageKey, -} - -/// Value of { changed key => set of extrinsic indices } mapping. -pub type ExtrinsicIndexValue = Vec; - -/// Key of { changed key => block/digest block numbers } mapping. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct DigestIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub key: StorageKey, -} - -/// Key of { childtrie key => Childchange trie } mapping. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct ChildIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub storage_key: PrefixedStorageKey, -} - -/// Value of { changed key => block/digest block numbers } mapping. -pub type DigestIndexValue = Vec; - -/// Value of { changed key => block/digest block numbers } mapping. -/// That is the root of the child change trie. -pub type ChildIndexValue = Vec; - -/// Single input pair of changes trie. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum InputPair { - /// Element of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex, ExtrinsicIndexValue), - /// Element of { key => set of blocks/digest blocks where key has been changed } element - /// mapping. - DigestIndex(DigestIndex, DigestIndexValue), - /// Element of { childtrie key => Childchange trie } where key has been changed } element - /// mapping. - ChildIndex(ChildIndex, ChildIndexValue), -} - -/// Single input key of changes trie. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum InputKey { - /// Key of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex), - /// Key of { key => set of blocks/digest blocks where key has been changed } element mapping. - DigestIndex(DigestIndex), - /// Key of { childtrie key => Childchange trie } where key has been changed } element mapping. - ChildIndex(ChildIndex), -} - -impl InputPair { - /// Extract storage key that this pair corresponds to. - pub fn key(&self) -> Option<&[u8]> { - match *self { - InputPair::ExtrinsicIndex(ref key, _) => Some(&key.key), - InputPair::DigestIndex(ref key, _) => Some(&key.key), - InputPair::ChildIndex(_, _) => None, - } - } -} - -impl Into<(StorageKey, StorageValue)> for InputPair { - fn into(self) -> (StorageKey, StorageValue) { - match self { - InputPair::ExtrinsicIndex(key, value) => (key.encode(), value.encode()), - InputPair::DigestIndex(key, value) => (key.encode(), value.encode()), - InputPair::ChildIndex(key, value) => (key.encode(), value.encode()), - } - } -} - -impl Into> for InputPair { - fn into(self) -> InputKey { - match self { - InputPair::ExtrinsicIndex(key, _) => InputKey::ExtrinsicIndex(key), - InputPair::DigestIndex(key, _) => InputKey::DigestIndex(key), - InputPair::ChildIndex(key, _) => InputKey::ChildIndex(key), - } - } -} - -impl ExtrinsicIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![1]; - prefix.extend(block.encode()); - prefix - } -} - -impl Encode for ExtrinsicIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(1); - self.block.encode_to(dest); - self.key.encode_to(dest); - } -} - -impl codec::EncodeLike for ExtrinsicIndex {} - -impl DigestIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![2]; - prefix.extend(block.encode()); - prefix - } -} - -impl Encode for DigestIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(2); - self.block.encode_to(dest); - self.key.encode_to(dest); - } -} - -impl ChildIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![3]; - prefix.extend(block.encode()); - prefix - } -} - -impl Encode for ChildIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(3); - self.block.encode_to(dest); - self.storage_key.encode_to(dest); - } -} - -impl codec::EncodeLike for DigestIndex {} - -impl Decode for InputKey { - fn decode(input: &mut I) -> Result { - match input.read_byte()? { - 1 => Ok(InputKey::ExtrinsicIndex(ExtrinsicIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - 2 => Ok(InputKey::DigestIndex(DigestIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - 3 => Ok(InputKey::ChildIndex(ChildIndex { - block: Decode::decode(input)?, - storage_key: PrefixedStorageKey::new(Decode::decode(input)?), - })), - _ => Err("Invalid input key variant".into()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn extrinsic_index_serialized_and_deserialized() { - let original = ExtrinsicIndex { block: 777u64, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::ExtrinsicIndex(original), deserialized); - } - - #[test] - fn digest_index_serialized_and_deserialized() { - let original = DigestIndex { block: 777u64, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::DigestIndex(original), deserialized); - } -} diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs deleted file mode 100644 index 40148095247dd..0000000000000 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ /dev/null @@ -1,428 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes trie related structures and functions. -//! -//! Changes trie is a trie built of { storage key => extrinsics } pairs -//! at the end of each block. For every changed storage key it contains -//! a pair, mapping key to the set of extrinsics where it has been changed. -//! -//! Optionally, every N blocks, additional level1-digest nodes are appended -//! to the changes trie, containing pairs { storage key => blocks }. For every -//! storage key that has been changed in PREVIOUS N-1 blocks (except for genesis -//! block) it contains a pair, mapping this key to the set of blocks where it -//! has been changed. -//! -//! Optionally, every N^digest_level (where digest_level > 1) blocks, additional -//! digest_level digest is created. It is built out of pairs { storage key => digest -//! block }, containing entries for every storage key that has been changed in -//! the last N*digest_level-1 blocks (except for genesis block), mapping these keys -//! to the set of lower-level digest blocks. -//! -//! Changes trie configuration could change within a time. The range of blocks, where -//! configuration has been active, is given by two blocks: zero and end. Zero block is -//! the block where configuration has been set. But the first changes trie that uses -//! this configuration will be built at the block zero+1. If configuration deactivates -//! at some block, this will be the end block of the configuration. It is also the -//! zero block of the next configuration. -//! -//! If configuration has the end block, it also means that 'skewed digest' has/should -//! been built at that block. If this is the block where max-level digest should have -//! been created, than it is simply max-level digest of this configuration. Otherwise, -//! it is the digest that covers all blocks since last max-level digest block was -//! created. -//! -//! Changes trie only contains the top level storage changes. Sub-level changes -//! are propagated through its storage root on the top level storage. - -mod build; -mod build_cache; -mod build_iterator; -mod changes_iterator; -mod input; -mod prune; -mod storage; -mod surface_iterator; - -pub use self::{ - build_cache::{BuildCache, CacheAction, CachedBuildData}, - changes_iterator::{ - key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, - }, - prune::prune, - storage::InMemoryStorage, -}; - -use crate::{ - backend::Backend, - changes_trie::{ - build::prepare_input, - build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, - }, - overlayed_changes::OverlayedChanges, - StorageKey, -}; -use codec::{Decode, Encode}; -use hash_db::{Hasher, Prefix}; -use num_traits::{One, Zero}; -use sp_core::{self, storage::PrefixedStorageKey}; -use sp_trie::{trie_types::TrieDBMut, DBValue, MemoryDB, TrieMut}; -use std::{ - collections::{HashMap, HashSet}, - convert::TryInto, -}; - -/// Requirements for block number that can be used with changes tries. -pub trait BlockNumber: - Send - + Sync - + 'static - + std::fmt::Display - + Clone - + From - + TryInto - + One - + Zero - + PartialEq - + Ord - + std::hash::Hash - + std::ops::Add - + ::std::ops::Sub - + std::ops::Mul - + ::std::ops::Div - + std::ops::Rem - + std::ops::AddAssign - + num_traits::CheckedMul - + num_traits::CheckedSub - + Decode - + Encode -{ -} - -impl BlockNumber for T where - T: Send - + Sync - + 'static - + std::fmt::Display - + Clone - + From - + TryInto - + One - + Zero - + PartialEq - + Ord - + std::hash::Hash - + std::ops::Add - + ::std::ops::Sub - + std::ops::Mul - + ::std::ops::Div - + std::ops::Rem - + std::ops::AddAssign - + num_traits::CheckedMul - + num_traits::CheckedSub - + Decode - + Encode -{ -} - -/// Block identifier that could be used to determine fork of this block. -#[derive(Debug)] -pub struct AnchorBlockId { - /// Hash of this block. - pub hash: Hash, - /// Number of this block. - pub number: Number, -} - -/// Changes tries state at some block. -pub struct State<'a, H, Number> { - /// Configuration that is active at given block. - pub config: Configuration, - /// Configuration activation block number. Zero if it is the first configuration on the chain, - /// or number of the block that have emit NewConfiguration signal (thus activating - /// configuration starting from the **next** block). - pub zero: Number, - /// Underlying changes tries storage reference. - pub storage: &'a dyn Storage, -} - -/// Changes trie storage. Provides access to trie roots and trie nodes. -pub trait RootsStorage: Send + Sync { - /// Resolve hash of the block into anchor. - fn build_anchor(&self, hash: H::Out) -> Result, String>; - /// Get changes trie root for the block with given number which is an ancestor (or the block - /// itself) of the anchor_block (i.e. anchor_block.number >= block). - fn root( - &self, - anchor: &AnchorBlockId, - block: Number, - ) -> Result, String>; -} - -/// Changes trie storage. Provides access to trie roots and trie nodes. -pub trait Storage: RootsStorage { - /// Casts from self reference to RootsStorage reference. - fn as_roots_storage(&self) -> &dyn RootsStorage; - /// Execute given functor with cached entry for given trie root. - /// Returns true if the functor has been called (cache entry exists) and false otherwise. - fn with_cached_changed_keys( - &self, - root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool; - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; -} - -/// Changes trie storage -> trie backend essence adapter. -pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>( - pub &'a dyn Storage, -); - -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage - for TrieBackendStorageAdapter<'a, H, N> -{ - type Overlay = sp_trie::MemoryDB; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.0.get(key, prefix) - } -} - -/// Changes trie configuration. -pub type Configuration = sp_core::ChangesTrieConfiguration; - -/// Blocks range where configuration has been constant. -#[derive(Clone)] -pub struct ConfigurationRange<'a, N> { - /// Active configuration. - pub config: &'a Configuration, - /// Zero block of this configuration. The configuration is active starting from the next block. - pub zero: N, - /// End block of this configuration. It is the last block where configuration has been active. - pub end: Option, -} - -impl<'a, H, Number> State<'a, H, Number> { - /// Create state with given config and storage. - pub fn new(config: Configuration, zero: Number, storage: &'a dyn Storage) -> Self { - Self { config, zero, storage } - } -} - -impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { - fn clone(&self) -> Self { - State { config: self.config.clone(), zero: self.zero.clone(), storage: self.storage } - } -} - -/// Create state where changes tries are disabled. -pub fn disabled_state<'a, H, Number>() -> Option> { - None -} - -/// Compute the changes trie root and transaction for given block. -/// Returns Err(()) if unknown `parent_hash` has been passed. -/// Returns Ok(None) if there's no data to perform computation. -/// Panics if background storage returns an error OR if insert to MemoryDB fails. -pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( - backend: &B, - state: Option<&'a State<'a, H, Number>>, - changes: &OverlayedChanges, - parent_hash: H::Out, - panic_on_storage_error: bool, -) -> Result, H::Out, CacheAction)>, ()> -where - H::Out: Ord + 'static + Encode, -{ - /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. - fn maybe_panic( - res: std::result::Result, - panic: bool, - ) -> std::result::Result { - res.map(Ok).unwrap_or_else(|e| { - if panic { - panic!( - "changes trie: storage access is not allowed to fail within runtime: {:?}", - e - ) - } else { - Err(()) - } - }) - } - - // when storage isn't provided, changes tries aren't created - let state = match state { - Some(state) => state, - None => return Ok(None), - }; - - // build_anchor error should not be considered fatal - let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; - let block = parent.number.clone() + One::one(); - - // prepare configuration range - we already know zero block. Current block may be the end block - // if configuration has been changed in this block - let is_config_changed = - match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { - Some(Some(new_config)) => new_config != &state.config.encode()[..], - Some(None) => true, - None => false, - }; - let config_range = ConfigurationRange { - config: &state.config, - zero: state.zero.clone(), - end: if is_config_changed { Some(block.clone()) } else { None }, - }; - - // storage errors are considered fatal (similar to situations when runtime fetches values from - // storage) - let (input_pairs, child_input_pairs, digest_input_blocks) = maybe_panic( - prepare_input::( - backend, - state.storage, - config_range.clone(), - changes, - &parent, - ), - panic_on_storage_error, - )?; - - // prepare cached data - let mut cache_action = prepare_cached_build_data(config_range, block.clone()); - let needs_changed_keys = cache_action.collects_changed_keys(); - cache_action = cache_action.set_digest_input_blocks(digest_input_blocks); - - let mut mdb = MemoryDB::default(); - let mut child_roots = Vec::with_capacity(child_input_pairs.len()); - for (child_index, input_pairs) in child_input_pairs { - let mut not_empty = false; - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - not_empty = true; - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = - cache_action.insert(Some(child_index.storage_key.clone()), storage_changed_keys); - } - if not_empty { - child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); - } - } - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - for (key, value) in child_roots.into_iter().map(Into::into) { - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = cache_action.insert(None, storage_changed_keys); - } - - let cache_action = cache_action.complete(block, &root); - Ok(Some((mdb, root, cache_action))) -} - -/// Prepare empty cached build data for given block. -fn prepare_cached_build_data( - config: ConfigurationRange, - block: Number, -) -> IncompleteCacheAction { - // when digests are not enabled in configuration, we do not need to cache anything - // because it'll never be used again for building other tries - // => let's clear the cache - if !config.config.is_digest_build_enabled() { - return IncompleteCacheAction::Clear - } - - // when this is the last block where current configuration is active - // => let's clear the cache - if config.end.as_ref() == Some(&block) { - return IncompleteCacheAction::Clear - } - - // we do not need to cache anything when top-level digest trie is created, because - // it'll never be used again for building other tries - // => let's clear the cache - match config.config.digest_level_at_block(config.zero.clone(), block) { - Some((digest_level, _, _)) if digest_level == config.config.digest_levels => - IncompleteCacheAction::Clear, - _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn cache_is_cleared_when_digests_are_disabled() { - let config = Configuration { digest_interval: 0, digest_levels: 0 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 8u32), IncompleteCacheAction::Clear); - } - - #[test] - fn build_data_is_cached_when_digests_are_enabled() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert!(prepare_cached_build_data(config_range.clone(), 4u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range.clone(), 7u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range, 8u32).collects_changed_keys()); - } - - #[test] - fn cache_is_cleared_when_digests_are_enabled_and_top_level_digest_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 64u32), IncompleteCacheAction::Clear); - } - - #[test] - fn cache_is_cleared_when_end_block_of_configuration_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: Some(4u32), config: &config }; - assert_eq!( - prepare_cached_build_data(config_range.clone(), 4u32), - IncompleteCacheAction::Clear - ); - } -} diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs deleted file mode 100644 index 2ca540562b47f..0000000000000 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ /dev/null @@ -1,204 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes trie pruning-related functions. - -use crate::{ - changes_trie::{ - input::{ChildIndex, InputKey}, - storage::TrieBackendAdapter, - AnchorBlockId, BlockNumber, Storage, - }, - proving_backend::ProvingBackendRecorder, - trie_backend_essence::TrieBackendEssence, -}; -use codec::{Codec, Decode}; -use hash_db::Hasher; -use log::warn; -use num_traits::One; -use sp_trie::Recorder; - -/// Prune obsolete changes tries. Pruning happens at the same block, where highest -/// level digest is created. Pruning guarantees to save changes tries for last -/// `min_blocks_to_keep` blocks. We only prune changes tries at `max_digest_interval` -/// ranges. -pub fn prune( - storage: &dyn Storage, - first: Number, - last: Number, - current_block: &AnchorBlockId, - mut remove_trie_node: F, -) where - H::Out: Codec, -{ - // delete changes trie for every block in range - let mut block = first; - loop { - if block >= last.clone() + One::one() { - break - } - - let prev_block = block.clone(); - block += One::one(); - - let block = prev_block; - let root = match storage.root(current_block, block.clone()) { - Ok(Some(root)) => root, - Ok(None) => continue, - Err(error) => { - // try to delete other tries - warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); - continue - }, - }; - let children_roots = { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - root, - ); - let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); - let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { - if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut key) { - if let Ok(value) = >::decode(&mut value) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.push(trie_root); - } - } - }); - - children_roots - }; - for root in children_roots.into_iter() { - prune_trie(storage, root, &mut remove_trie_node); - } - - prune_trie(storage, root, &mut remove_trie_node); - } -} - -// Prune a trie. -fn prune_trie( - storage: &dyn Storage, - root: H::Out, - remove_trie_node: &mut F, -) where - H::Out: Codec, -{ - // enumerate all changes trie' keys, recording all nodes that have been 'touched' - // (effectively - all changes trie nodes) - let mut proof_recorder: Recorder = Default::default(); - { - let mut trie = ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder: &mut proof_recorder, - }; - trie.record_all_keys(); - } - - // all nodes of this changes trie should be pruned - remove_trie_node(root); - for node in proof_recorder.drain().into_iter().map(|n| n.hash) { - remove_trie_node(node); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{backend::insert_into_memory_db, changes_trie::storage::InMemoryStorage}; - use codec::Encode; - use sp_core::H256; - use sp_runtime::traits::BlakeTwo256; - use sp_trie::MemoryDB; - use std::collections::HashSet; - - fn prune_by_collect( - storage: &dyn Storage, - first: u64, - last: u64, - current_block: u64, - ) -> HashSet { - let mut pruned_trie_nodes = HashSet::new(); - let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; - prune(storage, first, last, &anchor, |node| { - pruned_trie_nodes.insert(node); - }); - pruned_trie_nodes - } - - #[test] - fn prune_works() { - fn prepare_storage() -> InMemoryStorage { - let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); - let child_key = - ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() } - .encode(); - let mut mdb1 = MemoryDB::::default(); - let root1 = - insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]) - .unwrap(); - let mut mdb2 = MemoryDB::::default(); - let root2 = insert_into_memory_db::( - &mut mdb2, - vec![(vec![11], vec![21]), (vec![12], vec![22])], - ) - .unwrap(); - let mut mdb3 = MemoryDB::::default(); - let ch_root3 = - insert_into_memory_db::(&mut mdb3, vec![(vec![110], vec![120])]) - .unwrap(); - let root3 = insert_into_memory_db::( - &mut mdb3, - vec![ - (vec![13], vec![23]), - (vec![14], vec![24]), - (child_key, ch_root3.as_ref().encode()), - ], - ) - .unwrap(); - let mut mdb4 = MemoryDB::::default(); - let root4 = - insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]) - .unwrap(); - let storage = InMemoryStorage::new(); - storage.insert(65, root1, mdb1); - storage.insert(66, root2, mdb2); - storage.insert(67, root3, mdb3); - storage.insert(68, root4, mdb4); - - storage - } - - let storage = prepare_storage(); - assert!(prune_by_collect(&storage, 20, 30, 90).is_empty()); - assert!(!storage.into_mdb().drain().is_empty()); - - let storage = prepare_storage(); - let prune60_65 = prune_by_collect(&storage, 60, 65, 90); - assert!(!prune60_65.is_empty()); - storage.remove_from_storage(&prune60_65); - assert!(!storage.into_mdb().drain().is_empty()); - - let storage = prepare_storage(); - let prune60_70 = prune_by_collect(&storage, 60, 70, 90); - assert!(!prune60_70.is_empty()); - storage.remove_from_storage(&prune60_70); - assert!(storage.into_mdb().drain().is_empty()); - } -} diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs deleted file mode 100644 index bd5e3a32b5657..0000000000000 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ /dev/null @@ -1,214 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes trie storage utilities. - -use crate::{ - changes_trie::{AnchorBlockId, BlockNumber, BuildCache, RootsStorage, Storage}, - trie_backend_essence::TrieBackendStorage, - StorageKey, -}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; -use parking_lot::RwLock; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::{DBValue, MemoryDB}; -use std::collections::{BTreeMap, HashMap, HashSet}; - -#[cfg(test)] -use crate::backend::insert_into_memory_db; -#[cfg(test)] -use crate::changes_trie::input::{ChildIndex, InputPair}; - -/// In-memory implementation of changes trie storage. -pub struct InMemoryStorage { - data: RwLock>, - cache: BuildCache, -} - -/// Adapter for using changes trie storage as a TrieBackendEssence' storage. -pub struct TrieBackendAdapter<'a, H: Hasher, Number: BlockNumber> { - storage: &'a dyn Storage, - _hasher: std::marker::PhantomData<(H, Number)>, -} - -struct InMemoryStorageData { - roots: BTreeMap, - mdb: MemoryDB, -} - -impl InMemoryStorage { - /// Creates storage from given in-memory database. - pub fn with_db(mdb: MemoryDB) -> Self { - Self { - data: RwLock::new(InMemoryStorageData { roots: BTreeMap::new(), mdb }), - cache: BuildCache::new(), - } - } - - /// Creates storage with empty database. - pub fn new() -> Self { - Self::with_db(Default::default()) - } - - /// Creates storage with given proof. - pub fn with_proof(proof: Vec>) -> Self { - use hash_db::HashDB; - - let mut proof_db = MemoryDB::::default(); - for item in proof { - proof_db.insert(EMPTY_PREFIX, &item); - } - Self::with_db(proof_db) - } - - /// Get mutable cache reference. - pub fn cache_mut(&mut self) -> &mut BuildCache { - &mut self.cache - } - - /// Create the storage with given blocks. - pub fn with_blocks(blocks: Vec<(Number, H::Out)>) -> Self { - Self { - data: RwLock::new(InMemoryStorageData { - roots: blocks.into_iter().collect(), - mdb: MemoryDB::default(), - }), - cache: BuildCache::new(), - } - } - - #[cfg(test)] - pub fn with_inputs( - mut top_inputs: Vec<(Number, Vec>)>, - children_inputs: Vec<(PrefixedStorageKey, Vec<(Number, Vec>)>)>, - ) -> Self { - let mut mdb = MemoryDB::default(); - let mut roots = BTreeMap::new(); - for (storage_key, child_input) in children_inputs { - for (block, pairs) in child_input { - let root = - insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); - - if let Some(root) = root { - let ix = if let Some(ix) = top_inputs.iter().position(|v| v.0 == block) { - ix - } else { - top_inputs.push((block.clone(), Default::default())); - top_inputs.len() - 1 - }; - top_inputs[ix].1.push(InputPair::ChildIndex( - ChildIndex { block: block.clone(), storage_key: storage_key.clone() }, - root.as_ref().to_vec(), - )); - } - } - } - - for (block, pairs) in top_inputs { - let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); - if let Some(root) = root { - roots.insert(block, root); - } - } - - InMemoryStorage { - data: RwLock::new(InMemoryStorageData { roots, mdb }), - cache: BuildCache::new(), - } - } - - #[cfg(test)] - pub fn clear_storage(&self) { - self.data.write().mdb = MemoryDB::default(); // use new to be more correct - } - - #[cfg(test)] - pub fn remove_from_storage(&self, keys: &HashSet) { - let mut data = self.data.write(); - for key in keys { - data.mdb.remove_and_purge(key, hash_db::EMPTY_PREFIX); - } - } - - #[cfg(test)] - pub fn into_mdb(self) -> MemoryDB { - self.data.into_inner().mdb - } - - /// Insert changes trie for given block. - pub fn insert(&self, block: Number, changes_trie_root: H::Out, trie: MemoryDB) { - let mut data = self.data.write(); - data.roots.insert(block, changes_trie_root); - data.mdb.consolidate(trie); - } -} - -impl RootsStorage for InMemoryStorage { - fn build_anchor(&self, parent_hash: H::Out) -> Result, String> { - self.data - .read() - .roots - .iter() - .find(|(_, v)| **v == parent_hash) - .map(|(k, _)| AnchorBlockId { hash: parent_hash, number: k.clone() }) - .ok_or_else(|| format!("Can't find associated number for block {:?}", parent_hash)) - } - - fn root( - &self, - _anchor_block: &AnchorBlockId, - block: Number, - ) -> Result, String> { - Ok(self.data.read().roots.get(&block).cloned()) - } -} - -impl Storage for InMemoryStorage { - fn as_roots_storage(&self) -> &dyn RootsStorage { - self - } - - fn with_cached_changed_keys( - &self, - root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool { - self.cache.with_changed_keys(root, functor) - } - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) - } -} - -impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { - pub fn new(storage: &'a dyn Storage) -> Self { - Self { storage, _hasher: Default::default() } - } -} - -impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, -{ - type Overlay = MemoryDB; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.storage.get(key, prefix) - } -} diff --git a/primitives/state-machine/src/changes_trie/surface_iterator.rs b/primitives/state-machine/src/changes_trie/surface_iterator.rs deleted file mode 100644 index b3e5a490cd184..0000000000000 --- a/primitives/state-machine/src/changes_trie/surface_iterator.rs +++ /dev/null @@ -1,326 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The best way to understand how this iterator works is to imagine some 2D terrain that have some -//! mountains (digest changes tries) and valleys (changes tries for regular blocks). There are gems -//! (blocks) beneath the terrain. Given the request to find all gems in the range [X1; X2] this -//! iterator will return **minimal set** of points at the terrain (mountains and valleys) inside -//! this range that have to be drilled down to search for gems. - -use crate::changes_trie::{BlockNumber, ConfigurationRange}; -use num_traits::One; - -/// Returns surface iterator for given range of blocks. -/// -/// `max` is the number of best block, known to caller. We can't access any changes tries -/// that are built after this block, even though we may have them built already. -pub fn surface_iterator<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, -) -> Result, String> { - let (current, current_begin, digest_step, digest_level) = - lower_bound_max_digest(config.clone(), max.clone(), begin.clone(), end)?; - Ok(SurfaceIterator { - config, - begin, - max, - current: Some(current), - current_begin, - digest_step, - digest_level, - }) -} - -/// Surface iterator - only traverses top-level digests from given range and tries to find -/// all valid digest changes. -/// -/// Iterator item is the tuple of (last block of the current point + digest level of the current -/// point). Digest level is Some(0) when it is regular block, is Some(non-zero) when it is digest -/// block and None if it is skewed digest block. -pub struct SurfaceIterator<'a, Number: BlockNumber> { - config: ConfigurationRange<'a, Number>, - begin: Number, - max: Number, - current: Option, - current_begin: Number, - digest_step: u32, - digest_level: Option, -} - -impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { - type Item = Result<(Number, Option), String>; - - fn next(&mut self) -> Option { - let current = self.current.clone()?; - let digest_level = self.digest_level; - - if current < self.digest_step.into() { - self.current = None; - } else { - let next = current.clone() - self.digest_step.into(); - if next.is_zero() || next < self.begin { - self.current = None; - } else if next > self.current_begin { - self.current = Some(next); - } else { - let max_digest_interval = lower_bound_max_digest( - self.config.clone(), - self.max.clone(), - self.begin.clone(), - next, - ); - let (current, current_begin, digest_step, digest_level) = match max_digest_interval - { - Err(err) => return Some(Err(err)), - Ok(range) => range, - }; - - self.current = Some(current); - self.current_begin = current_begin; - self.digest_step = digest_step; - self.digest_level = digest_level; - } - } - - Some(Ok((current, digest_level))) - } -} - -/// Returns parameters of highest level digest block that includes the end of given range -/// and tends to include the whole range. -fn lower_bound_max_digest<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, -) -> Result<(Number, Number, u32, Option), String> { - if end > max || begin > end { - return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)) - } - if begin <= config.zero || - config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) - { - return Err(format!( - "changes trie range is not covered by configuration: {}..{}/{}..{}", - begin, - end, - config.zero, - match config.end.as_ref() { - Some(config_end) => format!("{}", config_end), - None => "None".into(), - } - )) - } - - let mut digest_level = 0u32; - let mut digest_step = 1u32; - let mut digest_interval = 0u32; - let mut current = end.clone(); - let mut current_begin = begin.clone(); - if current_begin != current { - while digest_level != config.config.digest_levels { - // try to use next level digest - let new_digest_level = digest_level + 1; - let new_digest_step = digest_step * config.config.digest_interval; - let new_digest_interval = config.config.digest_interval * { - if digest_interval == 0 { - 1 - } else { - digest_interval - } - }; - let new_digest_begin = config.zero.clone() + - ((current.clone() - One::one() - config.zero.clone()) / - new_digest_interval.into()) * - new_digest_interval.into(); - let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); - let new_current = new_digest_begin.clone() + new_digest_interval.into(); - - // check if we met skewed digest - if let Some(skewed_digest_end) = config.end.as_ref() { - if new_digest_end > *skewed_digest_end { - let skewed_digest_start = config.config.prev_max_level_digest_block( - config.zero.clone(), - skewed_digest_end.clone(), - ); - if let Some(skewed_digest_start) = skewed_digest_start { - let skewed_digest_range = (skewed_digest_end.clone() - - skewed_digest_start.clone()) - .try_into() - .ok() - .expect( - "skewed digest range is always <= max level digest range;\ - max level digest range always fits u32; qed", - ); - return Ok(( - skewed_digest_end.clone(), - skewed_digest_start, - skewed_digest_range, - None, - )) - } - } - } - - // we can't use next level digest if it touches any unknown (> max) blocks - if new_digest_end > max { - if begin < new_digest_begin { - current_begin = new_digest_begin; - } - break - } - - // we can (and will) use this digest - digest_level = new_digest_level; - digest_step = new_digest_step; - digest_interval = new_digest_interval; - current = new_current; - current_begin = new_digest_begin; - - // if current digest covers the whole range => no need to use next level digest - if current_begin <= begin && new_digest_end >= end { - break - } - } - } - - Ok((current, current_begin, digest_step, Some(digest_level))) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::changes_trie::Configuration; - - fn configuration_range<'a>( - config: &'a Configuration, - zero: u64, - ) -> ConfigurationRange<'a, u64> { - ConfigurationRange { config, zero, end: None } - } - - #[test] - fn lower_bound_max_digest_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - // when config activates at 0 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64) - .unwrap(), - (192, 176, 16, Some(2)), - ); - - // when config activates at 30 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64) - .unwrap(), - (222, 206, 16, Some(2)), - ); - } - - #[test] - fn surface_iterator_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - // when config activates at 0 - assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64,) - .unwrap() - .collect::>(), - vec![ - Ok((192, Some(2))), - Ok((176, Some(2))), - Ok((160, Some(2))), - Ok((144, Some(2))), - Ok((128, Some(2))), - Ok((112, Some(2))), - Ok((96, Some(2))), - Ok((80, Some(2))), - Ok((64, Some(2))), - Ok((48, Some(2))), - ], - ); - - // when config activates at 30 - assert_eq!( - surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64,) - .unwrap() - .collect::>(), - vec![ - Ok((190, Some(2))), - Ok((174, Some(2))), - Ok((158, Some(2))), - Ok((142, Some(2))), - Ok((126, Some(2))), - Ok((110, Some(2))), - Ok((94, Some(2))), - Ok((78, Some(2))), - Ok((62, Some(2))), - Ok((46, Some(2))), - ], - ); - - // when config activates at 0 AND max block is before next digest - assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64) - .unwrap() - .collect::>(), - vec![ - Ok((183, Some(0))), - Ok((182, Some(0))), - Ok((181, Some(0))), - Ok((180, Some(1))), - Ok((176, Some(2))), - Ok((160, Some(2))), - Ok((144, Some(2))), - Ok((128, Some(2))), - Ok((112, Some(2))), - Ok((96, Some(2))), - Ok((80, Some(2))), - Ok((64, Some(2))), - Ok((48, Some(2))), - ], - ); - } - - #[test] - fn surface_iterator_works_with_skewed_digest() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let mut config_range = configuration_range(&config, 0u64); - - // when config activates at 0 AND ends at 170 - config_range.end = Some(170); - assert_eq!( - surface_iterator(config_range, 100_000u64, 40u64, 170u64) - .unwrap() - .collect::>(), - vec![ - Ok((170, None)), - Ok((160, Some(2))), - Ok((144, Some(2))), - Ok((128, Some(2))), - Ok((112, Some(2))), - Ok((96, Some(2))), - Ok((80, Some(2))), - Ok((64, Some(2))), - Ok((48, Some(2))), - ], - ); - } -} diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index c20d8492fb1f3..8f914ab3eee64 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -28,8 +28,6 @@ use sp_core::storage::{well_known_keys::is_child_storage_key, ChildInfo, Tracked use sp_externalities::{Extension, ExtensionStore, Externalities}; use sp_trie::{empty_child_trie_root, trie_types::Layout}; -#[cfg(feature = "std")] -use crate::changes_trie::State as ChangesTrieState; use crate::{log_error, trace, warn, StorageTransactionCache}; use sp_std::{ any::{Any, TypeId}, @@ -90,62 +88,52 @@ impl error::Error for Error { } /// Wraps a read-only backend, call executor, and current overlayed changes. -pub struct Ext<'a, H, N, B> +pub struct Ext<'a, H, B> where H: Hasher, B: 'a + Backend, - N: crate::changes_trie::BlockNumber, { /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, /// The storage backend to read from. backend: &'a B, /// The cache for the storage transactions. - storage_transaction_cache: &'a mut StorageTransactionCache, - /// Changes trie state to read from. - #[cfg(feature = "std")] - changes_trie_state: Option>, + storage_transaction_cache: &'a mut StorageTransactionCache, /// Pseudo-unique id used for tracing. pub id: u16, - /// Dummy usage of N arg. - _phantom: sp_std::marker::PhantomData, /// Extensions registered with this instance. #[cfg(feature = "std")] extensions: Option>, } -impl<'a, H, N, B> Ext<'a, H, N, B> +impl<'a, H, B> Ext<'a, H, B> where H: Hasher, B: Backend, - N: crate::changes_trie::BlockNumber, { /// Create a new `Ext`. #[cfg(not(feature = "std"))] pub fn new( overlay: &'a mut OverlayedChanges, - storage_transaction_cache: &'a mut StorageTransactionCache, + storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, ) -> Self { - Ext { overlay, backend, id: 0, storage_transaction_cache, _phantom: Default::default() } + Ext { overlay, backend, id: 0, storage_transaction_cache } } /// Create a new `Ext` from overlayed changes and read-only backend #[cfg(feature = "std")] pub fn new( overlay: &'a mut OverlayedChanges, - storage_transaction_cache: &'a mut StorageTransactionCache, + storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, - changes_trie_state: Option>, extensions: Option<&'a mut sp_externalities::Extensions>, ) -> Self { Self { overlay, backend, - changes_trie_state, storage_transaction_cache, id: rand::random(), - _phantom: Default::default(), extensions: extensions.map(OverlayedExtensions::new), } } @@ -159,12 +147,11 @@ where } #[cfg(test)] -impl<'a, H, N, B> Ext<'a, H, N, B> +impl<'a, H, B> Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static, B: 'a + Backend, - N: crate::changes_trie::BlockNumber, { pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; @@ -181,12 +168,11 @@ where } } -impl<'a, H, N, B> Externalities for Ext<'a, H, N, B> +impl<'a, H, B> Externalities for Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: Backend, - N: crate::changes_trie::BlockNumber, { fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { self.overlay.set_offchain_storage(key, value) @@ -644,54 +630,6 @@ where .add_transaction_index(IndexOperation::Renew { extrinsic: index, hash: hash.to_vec() }); } - #[cfg(not(feature = "std"))] - fn storage_changes_root(&mut self, _parent_hash: &[u8]) -> Result>, ()> { - Ok(None) - } - - #[cfg(feature = "std")] - fn storage_changes_root(&mut self, mut parent_hash: &[u8]) -> Result>, ()> { - let _guard = guard(); - if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root - { - trace!( - target: "state", - method = "ChangesRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - parent_hash = %HexDisplay::from(&parent_hash), - ?root, - cached = true, - ); - - Ok(Some(root.encode())) - } else { - let root = self.overlay.changes_trie_root( - self.backend, - self.changes_trie_state.as_ref(), - Decode::decode(&mut parent_hash).map_err(|e| { - trace!( - target: "state", - error = %e, - "Failed to decode changes root parent hash", - ) - })?, - true, - self.storage_transaction_cache, - ); - - trace!( - target: "state", - method = "ChangesRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - parent_hash = %HexDisplay::from(&parent_hash), - ?root, - cached = false, - ); - - root.map(|r| r.map(|o| o.encode())) - } - } - fn storage_start_transaction(&mut self) { self.overlay.start_transaction() } @@ -710,13 +648,7 @@ where self.overlay.rollback_transaction().expect(BENCHMARKING_FN); } self.overlay - .drain_storage_changes( - self.backend, - #[cfg(feature = "std")] - None, - Default::default(), - self.storage_transaction_cache, - ) + .drain_storage_changes(self.backend, Default::default(), self.storage_transaction_cache) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); @@ -731,13 +663,7 @@ where } let changes = self .overlay - .drain_storage_changes( - self.backend, - #[cfg(feature = "std")] - None, - Default::default(), - self.storage_transaction_cache, - ) + .drain_storage_changes(self.backend, Default::default(), self.storage_transaction_cache) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend .commit( @@ -778,12 +704,11 @@ where } } -impl<'a, H, N, B> Ext<'a, H, N, B> +impl<'a, H, B> Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: Backend, - N: crate::changes_trie::BlockNumber, { fn limit_remove_from_backend( &mut self, @@ -869,12 +794,11 @@ impl<'a> StorageAppend<'a> { } #[cfg(not(feature = "std"))] -impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B> +impl<'a, H, B> ExtensionStore for Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: Backend, - N: crate::changes_trie::BlockNumber, { fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> { None @@ -897,11 +821,10 @@ where } #[cfg(feature = "std")] -impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B> +impl<'a, H, B> ExtensionStore for Ext<'a, H, B> where H: Hasher, B: 'a + Backend, - N: crate::changes_trie::BlockNumber, { fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { self.extensions.as_mut().and_then(|exts| exts.get_mut(type_id)) @@ -938,86 +861,16 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{ - changes_trie::{ - Configuration as ChangesTrieConfiguration, InMemoryStorage as TestChangesTrieStorage, - }, - InMemoryBackend, - }; + use crate::InMemoryBackend; use codec::Encode; - use hex_literal::hex; - use num_traits::Zero; use sp_core::{ map, - storage::{well_known_keys::EXTRINSIC_INDEX, Storage, StorageChild}, - Blake2Hasher, H256, + storage::{Storage, StorageChild}, + Blake2Hasher, }; type TestBackend = InMemoryBackend; - type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - - fn prepare_overlay_with_changes() -> OverlayedChanges { - let mut changes = OverlayedChanges::default(); - changes.set_collect_extrinsics(true); - changes.set_extrinsic_index(1); - changes.set_storage(vec![1], Some(vec![100])); - changes.set_storage(EXTRINSIC_INDEX.to_vec(), Some(3u32.encode())); - changes.set_offchain_storage(b"k1", Some(b"v1")); - changes.set_offchain_storage(b"k2", Some(b"v2")); - changes - } - - fn changes_trie_config() -> ChangesTrieConfiguration { - ChangesTrieConfiguration { digest_interval: 0, digest_levels: 0 } - } - - #[test] - fn storage_changes_root_is_none_when_storage_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); - } - - #[test] - fn storage_changes_root_is_none_when_state_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); - assert_eq!( - ext.storage_changes_root(&H256::default().encode()).unwrap(), - Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()), - ); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - overlay.set_collect_extrinsics(false); - overlay.set_storage(vec![1], None); - let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); - assert_eq!( - ext.storage_changes_root(&H256::default().encode()).unwrap(), - Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()), - ); - } + type TestExt<'a> = Ext<'a, Blake2Hasher, TestBackend>; #[test] fn next_storage_key_works() { @@ -1035,7 +888,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -1051,7 +904,7 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); @@ -1079,7 +932,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.next_storage_key(&[5]), Some(vec![30])); @@ -1110,7 +963,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -1126,7 +979,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -1155,7 +1008,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( @@ -1192,7 +1045,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); use sp_core::storage::well_known_keys; let mut ext = ext; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index b0178021f3130..f7477e232bc66 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,8 +23,6 @@ pub mod backend; #[cfg(feature = "std")] mod basic; -#[cfg(feature = "std")] -mod changes_trie; mod error; mod ext; #[cfg(feature = "std")] @@ -140,28 +138,10 @@ pub use crate::{ }; pub use error::{Error, ExecutionError}; -#[cfg(not(feature = "std"))] -mod changes_trie { - /// Stub for change trie block number until - /// change trie move to no_std. - pub trait BlockNumber {} - - impl BlockNumber for N {} -} - #[cfg(feature = "std")] mod std_reexport { pub use crate::{ basic::BasicExternalities, - changes_trie::{ - disabled_state as disabled_changes_trie_state, key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, - AnchorBlockId as ChangesTrieAnchorBlockId, BlockNumber as ChangesTrieBlockNumber, - BuildCache as ChangesTrieBuildCache, CacheAction as ChangesTrieCacheAction, - ConfigurationRange as ChangesTrieConfigurationRange, - InMemoryStorage as InMemoryChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, - State as ChangesTrieState, Storage as ChangesTrieStorage, - }, error::{Error, ExecutionError}, in_memory_backend::new_in_mem, proving_backend::{ @@ -205,10 +185,6 @@ mod execution { /// Default handler of the execution manager. pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; - /// Type of changes trie transaction. - pub type ChangesTrieTransaction = - (MemoryDB, ChangesTrieCacheAction<::Out, N>); - /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; @@ -308,11 +284,10 @@ mod execution { } /// The substrate state machine. - pub struct StateMachine<'a, B, H, N, Exec> + pub struct StateMachine<'a, B, H, Exec> where H: Hasher, B: Backend, - N: ChangesTrieBlockNumber, { backend: &'a B, exec: &'a Exec, @@ -320,8 +295,7 @@ mod execution { call_data: &'a [u8], overlay: &'a mut OverlayedChanges, extensions: Extensions, - changes_trie_state: Option>, - storage_transaction_cache: Option<&'a mut StorageTransactionCache>, + storage_transaction_cache: Option<&'a mut StorageTransactionCache>, runtime_code: &'a RuntimeCode<'a>, stats: StateMachineStats, /// The hash of the block the state machine will be executed on. @@ -330,29 +304,26 @@ mod execution { parent_hash: Option, } - impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> + impl<'a, B, H, Exec> Drop for StateMachine<'a, B, H, Exec> where H: Hasher, B: Backend, - N: ChangesTrieBlockNumber, { fn drop(&mut self) { self.backend.register_overlay_stats(&self.stats); } } - impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> + impl<'a, B, H, Exec> StateMachine<'a, B, H, Exec> where H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, B: Backend, - N: crate::changes_trie::BlockNumber, { /// Creates new substrate state machine. pub fn new( backend: &'a B, - changes_trie_state: Option>, overlay: &'a mut OverlayedChanges, exec: &'a Exec, method: &'a str, @@ -371,7 +342,6 @@ mod execution { call_data, extensions, overlay, - changes_trie_state, storage_transaction_cache: None, runtime_code, stats: StateMachineStats::default(), @@ -386,7 +356,7 @@ mod execution { /// build that will be cached. pub fn with_storage_transaction_cache( mut self, - cache: Option<&'a mut StorageTransactionCache>, + cache: Option<&'a mut StorageTransactionCache>, ) -> Self { self.storage_transaction_cache = cache; self @@ -439,13 +409,7 @@ mod execution { .enter_runtime() .expect("StateMachine is never called from the runtime; qed"); - let mut ext = Ext::new( - self.overlay, - cache, - self.backend, - self.changes_trie_state.clone(), - Some(&mut self.extensions), - ); + let mut ext = Ext::new(self.overlay, cache, self.backend, Some(&mut self.extensions)); let ext_id = ext.id; @@ -562,9 +526,6 @@ mod execution { CallResult, ) -> CallResult, { - let changes_tries_enabled = self.changes_trie_state.is_some(); - self.overlay.set_collect_extrinsics(changes_tries_enabled); - let result = { match manager { ExecutionManager::Both(on_consensus_failure) => self @@ -588,7 +549,7 @@ mod execution { } /// Prove execution using the given state backend, overlayed changes, and call executor. - pub fn prove_execution( + pub fn prove_execution( backend: &mut B, overlay: &mut OverlayedChanges, exec: &Exec, @@ -602,13 +563,12 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { let trie_backend = backend .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _, _>( + prove_execution_on_trie_backend::<_, _, _, _>( trie_backend, overlay, exec, @@ -628,7 +588,7 @@ mod execution { /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. - pub fn prove_execution_on_trie_backend( + pub fn prove_execution_on_trie_backend( trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, @@ -642,13 +602,11 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine::<_, H, N, Exec>::new( + let mut sm = StateMachine::<_, H, Exec>::new( &proving_backend, - None, overlay, exec, method, @@ -667,7 +625,7 @@ mod execution { } /// Check execution proof, generated by `prove_execution` call. - pub fn execution_proof_check( + pub fn execution_proof_check( root: H::Out, proof: StorageProof, overlay: &mut OverlayedChanges, @@ -681,11 +639,10 @@ mod execution { H: Hasher, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _, _>( + execution_proof_check_on_trie_backend::<_, _, _>( &trie_backend, overlay, exec, @@ -697,7 +654,7 @@ mod execution { } /// Check execution proof on proving backend, generated by `prove_execution` call. - pub fn execution_proof_check_on_trie_backend( + pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, @@ -710,12 +667,10 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let mut sm = StateMachine::<_, H, N, Exec>::new( + let mut sm = StateMachine::<_, H, Exec>::new( trie_backend, - None, overlay, exec, method, @@ -1390,7 +1345,7 @@ mod execution { #[cfg(test)] mod tests { - use super::{changes_trie::Configuration as ChangesTrieConfig, ext::Ext, *}; + use super::{ext::Ext, *}; use crate::execution::CallResult; use codec::{Decode, Encode}; use sp_core::{ @@ -1409,7 +1364,6 @@ mod tests { #[derive(Clone)] struct DummyCodeExecutor { - change_changes_trie_config: bool, native_available: bool, native_succeeds: bool, fallback_succeeds: bool, @@ -1430,13 +1384,6 @@ mod tests { use_native: bool, native_call: Option, ) -> (CallResult, bool) { - if self.change_changes_trie_config { - ext.place_storage( - sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), - Some(ChangesTrieConfig { digest_interval: 777, digest_levels: 333 }.encode()), - ); - } - let using_native = use_native && self.native_available; match (using_native, self.native_succeeds, self.fallback_succeeds, native_call) { (true, true, _, Some(call)) => { @@ -1472,10 +1419,8 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: true, @@ -1498,10 +1443,8 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: true, @@ -1525,10 +1468,8 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: false, @@ -1555,7 +1496,6 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { let executor = DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: true, @@ -1564,7 +1504,7 @@ mod tests { // fetch execution proof from 'remote' full node let mut remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; - let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( + let (remote_result, remote_proof) = prove_execution( &mut remote_backend, &mut Default::default(), &executor, @@ -1576,7 +1516,7 @@ mod tests { .unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::( remote_root, remote_proof, &mut Default::default(), @@ -1614,13 +1554,7 @@ mod tests { let overlay_limit = overlay.clone(); { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.clear_prefix(b"ab", None); } overlay.commit_transaction().unwrap(); @@ -1644,13 +1578,7 @@ mod tests { let mut overlay = overlay_limit; { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!((false, 1), ext.clear_prefix(b"ab", Some(1))); } overlay.commit_transaction().unwrap(); @@ -1692,13 +1620,7 @@ mod tests { { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); } @@ -1733,13 +1655,7 @@ mod tests { let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.kill_child_storage(&child_info, Some(0)), (false, 0)); assert_eq!(ext.kill_child_storage(&child_info, Some(1)), (false, 1)); assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); @@ -1758,13 +1674,7 @@ mod tests { let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec()); assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec())); @@ -1781,26 +1691,14 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.storage_append(key.clone(), reference_data[0].encode()); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } overlay.start_transaction(); { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); for i in reference_data.iter().skip(1) { ext.storage_append(key.clone(), i.encode()); @@ -1809,13 +1707,7 @@ mod tests { } overlay.rollback_transaction().unwrap(); { - let ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } } @@ -1837,13 +1729,7 @@ mod tests { // For example, block initialization with event. { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.clear_storage(key.as_slice()); ext.storage_append(key.clone(), Item::InitializationItem.encode()); } @@ -1851,13 +1737,7 @@ mod tests { // For example, first transaction resulted in panic during block building { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); @@ -1872,13 +1752,7 @@ mod tests { // Then we apply next transaction which is valid this time. { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); @@ -1893,13 +1767,7 @@ mod tests { // Then only initlaization item and second (committed) item should persist. { - let ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!( ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), @@ -2214,13 +2082,7 @@ mod tests { let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); @@ -2257,13 +2119,7 @@ mod tests { { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(b"bbb"), Some(vec![])); assert_eq!(ext.storage(b"ccc"), Some(vec![])); ext.clear_storage(b"ccc"); @@ -2286,10 +2142,8 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: false, @@ -2301,7 +2155,7 @@ mod tests { TaskExecutor::new(), ); - let run_state_machine = |state_machine: &mut StateMachine<_, _, _, _>| { + let run_state_machine = |state_machine: &mut StateMachine<_, _, _>| { state_machine .execute_using_consensus_failure_handler:: _, _, _>( ExecutionManager::NativeWhenPossible, diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index 1ffd569e2828b..818b7be99bc6e 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -69,7 +69,6 @@ struct InnerValue { /// Current value. None if value has been deleted. value: V, /// The set of extrinsic indices where the values has been changed. - /// Is filled only if runtime has announced changes trie support. extrinsics: Extrinsics, } diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index cf7af1c9a6f3a..b7a535792aae6 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -21,12 +21,7 @@ mod changeset; mod offchain; use self::changeset::OverlayedChangeSet; -use crate::{backend::Backend, changes_trie::BlockNumber, stats::StateMachineStats, DefaultError}; -#[cfg(feature = "std")] -use crate::{ - changes_trie::{build_changes_trie, State as ChangesTrieState}, - ChangesTrieTransaction, -}; +use crate::{backend::Backend, stats::StateMachineStats, DefaultError}; use codec::{Decode, Encode}; use hash_db::Hasher; pub use offchain::OffchainOverlayedChanges; @@ -134,7 +129,7 @@ pub enum IndexOperation { /// /// This contains all the changes to the storage and transactions to apply theses changes to the /// backend. -pub struct StorageChanges { +pub struct StorageChanges { /// All changes to the main storage. /// /// A value of `None` means that it was deleted. @@ -150,22 +145,13 @@ pub struct StorageChanges { pub transaction: Transaction, /// The storage root after applying the transaction. pub transaction_storage_root: H::Out, - /// Contains the transaction for the backend for the changes trie. - /// - /// If changes trie is disabled the value is set to `None`. - #[cfg(feature = "std")] - pub changes_trie_transaction: Option>, - /// Phantom data for block number until change trie support no_std. - #[cfg(not(feature = "std"))] - pub _ph: sp_std::marker::PhantomData, - /// Changes to the transaction index, #[cfg(feature = "std")] pub transaction_index_changes: Vec, } #[cfg(feature = "std")] -impl StorageChanges { +impl StorageChanges { /// Deconstruct into the inner values pub fn into_inner( self, @@ -175,7 +161,6 @@ impl StorageChanges { OffchainChangesCollection, Transaction, H::Out, - Option>, Vec, ) { ( @@ -184,58 +169,35 @@ impl StorageChanges { self.offchain_storage_changes, self.transaction, self.transaction_storage_root, - self.changes_trie_transaction, self.transaction_index_changes, ) } } -/// The storage transaction are calculated as part of the `storage_root` and -/// `changes_trie_storage_root`. These transactions can be reused for importing the block into the +/// Storage transactions are calculated as part of the `storage_root`. +/// These transactions can be reused for importing the block into the /// storage. So, we cache them to not require a recomputation of those transactions. -pub struct StorageTransactionCache { +pub struct StorageTransactionCache { /// Contains the changes for the main and the child storages as one transaction. pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, - /// Contains the changes trie transaction. - #[cfg(feature = "std")] - pub(crate) changes_trie_transaction: Option>>, - /// The storage root after applying the changes trie transaction. - #[cfg(feature = "std")] - pub(crate) changes_trie_transaction_storage_root: Option>, - /// Phantom data for block number until change trie support no_std. - #[cfg(not(feature = "std"))] - pub(crate) _ph: sp_std::marker::PhantomData, } -impl StorageTransactionCache { +impl StorageTransactionCache { /// Reset the cached transactions. pub fn reset(&mut self) { *self = Self::default(); } } -impl Default - for StorageTransactionCache -{ +impl Default for StorageTransactionCache { fn default() -> Self { - Self { - transaction: None, - transaction_storage_root: None, - #[cfg(feature = "std")] - changes_trie_transaction: None, - #[cfg(feature = "std")] - changes_trie_transaction_storage_root: None, - #[cfg(not(feature = "std"))] - _ph: Default::default(), - } + Self { transaction: None, transaction_storage_root: None } } } -impl Default - for StorageChanges -{ +impl Default for StorageChanges { fn default() -> Self { Self { main_storage_changes: Default::default(), @@ -244,10 +206,6 @@ impl Default transaction: Default::default(), transaction_storage_root: Default::default(), #[cfg(feature = "std")] - changes_trie_transaction: None, - #[cfg(not(feature = "std"))] - _ph: Default::default(), - #[cfg(feature = "std")] transaction_index_changes: Default::default(), } } @@ -539,27 +497,25 @@ impl OverlayedChanges { /// Convert this instance with all changes into a [`StorageChanges`] instance. #[cfg(feature = "std")] - pub fn into_storage_changes, H: Hasher, N: BlockNumber>( + pub fn into_storage_changes, H: Hasher>( mut self, backend: &B, - changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, - mut cache: StorageTransactionCache, - ) -> Result, DefaultError> + mut cache: StorageTransactionCache, + ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { - self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) + self.drain_storage_changes(backend, parent_hash, &mut cache) } /// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place. - pub fn drain_storage_changes, H: Hasher, N: BlockNumber>( + pub fn drain_storage_changes, H: Hasher>( &mut self, backend: &B, - #[cfg(feature = "std")] changes_trie_state: Option<&ChangesTrieState>, - parent_hash: H::Out, - mut cache: &mut StorageTransactionCache, - ) -> Result, DefaultError> + _parent_hash: H::Out, + mut cache: &mut StorageTransactionCache, + ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { @@ -574,21 +530,6 @@ impl OverlayedChanges { .and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr))) .expect("Transaction was be generated as part of `storage_root`; qed"); - // If the transaction does not exist, we generate it. - #[cfg(feature = "std")] - if cache.changes_trie_transaction.is_none() { - self.changes_trie_root(backend, changes_trie_state, parent_hash, false, &mut cache) - .map_err(|_| "Failed to generate changes trie transaction")?; - } - #[cfg(not(feature = "std"))] - let _ = parent_hash; - - #[cfg(feature = "std")] - let changes_trie_transaction = cache - .changes_trie_transaction - .take() - .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); - let (main_storage_changes, child_storage_changes) = self.drain_committed(); let offchain_storage_changes = self.offchain_drain_committed().collect(); @@ -604,11 +545,7 @@ impl OverlayedChanges { transaction, transaction_storage_root, #[cfg(feature = "std")] - changes_trie_transaction, - #[cfg(feature = "std")] transaction_index_changes, - #[cfg(not(feature = "std"))] - _ph: Default::default(), }) } @@ -639,10 +576,10 @@ impl OverlayedChanges { /// as seen by the current transaction. /// /// Returns the storage root and caches storage transaction in the given `cache`. - pub fn storage_root>( + pub fn storage_root>( &self, backend: &B, - cache: &mut StorageTransactionCache, + cache: &mut StorageTransactionCache, ) -> H::Out where H::Out: Ord + Encode, @@ -660,40 +597,6 @@ impl OverlayedChanges { root } - /// Generate the changes trie root. - /// - /// Returns the changes trie root and caches the storage transaction into the given `cache`. - /// - /// # Panics - /// - /// Panics on storage error, when `panic_on_storage_error` is set. - #[cfg(feature = "std")] - pub fn changes_trie_root<'a, H: Hasher, N: BlockNumber, B: Backend>( - &self, - backend: &B, - changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, - parent_hash: H::Out, - panic_on_storage_error: bool, - cache: &mut StorageTransactionCache, - ) -> Result, ()> - where - H::Out: Ord + Encode + 'static, - { - build_changes_trie::<_, H, N>( - backend, - changes_trie_state, - self, - parent_hash, - panic_on_storage_error, - ) - .map(|r| { - let root = r.as_ref().map(|r| r.1).clone(); - cache.changes_trie_transaction = Some(r.map(|(db, _, cache)| (db, cache))); - cache.changes_trie_transaction_storage_root = Some(root); - root - }) - } - /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value. pub fn iter_after(&self, key: &[u8]) -> impl Iterator { @@ -937,7 +840,6 @@ mod tests { .collect(); let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges::default(); - overlay.set_collect_extrinsics(false); overlay.start_transaction(); overlay.set_storage(b"dog".to_vec(), Some(b"puppy".to_vec())); @@ -950,13 +852,7 @@ mod tests { overlay.set_storage(b"doug".to_vec(), None); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - crate::changes_trie::disabled_state::<_, u64>(), - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 5b7d568b0311e..b3e43d4c46e7f 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -153,10 +153,6 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("child_storage_root is not supported in ReadOnlyExternalities") } - fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { - unimplemented!("storage_changes_root is not supported in ReadOnlyExternalities") - } - fn storage_start_transaction(&mut self) { unimplemented!("Transactions are not supported by ReadOnlyExternalities"); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 23f66ee14d87e..59a0a5a6837ec 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -23,21 +23,15 @@ use std::{ }; use crate::{ - backend::Backend, - changes_trie::{ - BlockNumber as ChangesTrieBlockNumber, Configuration as ChangesTrieConfiguration, - InMemoryStorage as ChangesTrieInMemoryStorage, State as ChangesTrieState, - }, - ext::Ext, - InMemoryBackend, OverlayedChanges, StorageKey, StorageTransactionCache, StorageValue, + backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, + StorageTransactionCache, StorageValue, }; -use codec::Decode; use hash_db::Hasher; use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ - well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE}, + well_known_keys::{is_child_storage_key, CODE}, Storage, }, testing::TaskExecutor, @@ -46,7 +40,7 @@ use sp_core::{ use sp_externalities::{Extension, ExtensionStore, Extensions}; /// Simple HashMap-based Externalities impl. -pub struct TestExternalities +pub struct TestExternalities where H::Out: codec::Codec + Ord, { @@ -54,33 +48,23 @@ where overlay: OverlayedChanges, offchain_db: TestPersistentOffchainDB, storage_transaction_cache: - StorageTransactionCache< as Backend>::Transaction, H, N>, + StorageTransactionCache< as Backend>::Transaction, H>, /// Storage backend. pub backend: InMemoryBackend, - changes_trie_config: Option, - changes_trie_storage: ChangesTrieInMemoryStorage, /// Extensions. pub extensions: Extensions, } -impl TestExternalities +impl TestExternalities where H::Out: Ord + 'static + codec::Codec, { /// Get externalities implementation. - pub fn ext(&mut self) -> Ext> { + pub fn ext(&mut self) -> Ext> { Ext::new( &mut self.overlay, &mut self.storage_transaction_cache, &self.backend, - match self.changes_trie_config.clone() { - Some(config) => Some(ChangesTrieState { - config, - zero: 0.into(), - storage: &self.changes_trie_storage, - }), - None => None, - }, Some(&mut self.extensions), ) } @@ -97,12 +81,7 @@ where /// Create a new instance of `TestExternalities` with code and storage. pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { - let mut overlay = OverlayedChanges::default(); - let changes_trie_config = storage - .top - .get(CHANGES_TRIE_CONFIG) - .and_then(|v| Decode::decode(&mut &v[..]).ok()); - overlay.set_collect_extrinsics(changes_trie_config.is_some()); + let overlay = OverlayedChanges::default(); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); @@ -117,9 +96,7 @@ where TestExternalities { overlay, offchain_db, - changes_trie_config, extensions, - changes_trie_storage: ChangesTrieInMemoryStorage::new(), backend: storage.into(), storage_transaction_cache: Default::default(), } @@ -150,11 +127,6 @@ where self.extensions.register(ext); } - /// Get mutable reference to changes trie storage. - pub fn changes_trie_storage(&mut self) -> &mut ChangesTrieInMemoryStorage { - &mut self.changes_trie_storage - } - /// Return a new backend with all pending changes. /// /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open @@ -180,9 +152,8 @@ where /// /// This will panic if there are still open transactions. pub fn commit_all(&mut self) -> Result<(), String> { - let changes = self.overlay.drain_storage_changes::<_, _, N>( + let changes = self.overlay.drain_storage_changes::<_, _>( &self.backend, - None, Default::default(), &mut Default::default(), )?; @@ -216,7 +187,7 @@ where } } -impl std::fmt::Debug for TestExternalities +impl std::fmt::Debug for TestExternalities where H::Out: Ord + codec::Codec, { @@ -225,18 +196,18 @@ where } } -impl PartialEq for TestExternalities +impl PartialEq for TestExternalities where H::Out: Ord + 'static + codec::Codec, { /// This doesn't test if they are in the same state, only if they contains the /// same data at this state - fn eq(&self, other: &TestExternalities) -> bool { + fn eq(&self, other: &TestExternalities) -> bool { self.as_backend().eq(&other.as_backend()) } } -impl Default for TestExternalities +impl Default for TestExternalities where H::Out: Ord + 'static + codec::Codec, { @@ -245,7 +216,7 @@ where } } -impl From for TestExternalities +impl From for TestExternalities where H::Out: Ord + 'static + codec::Codec, { @@ -254,11 +225,10 @@ where } } -impl sp_externalities::ExtensionStore for TestExternalities +impl sp_externalities::ExtensionStore for TestExternalities where H: Hasher, H::Out: Ord + codec::Codec, - N: ChangesTrieBlockNumber, { fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { self.extensions.get_mut(type_id) @@ -284,11 +254,10 @@ where } } -impl sp_externalities::ExternalitiesExt for TestExternalities +impl sp_externalities::ExternalitiesExt for TestExternalities where H: Hasher, H::Out: Ord + codec::Codec, - N: ChangesTrieBlockNumber, { fn extension(&mut self) -> Option<&mut T> { self.extension_by_type_id(TypeId::of::()).and_then(::downcast_mut) @@ -312,7 +281,7 @@ mod tests { #[test] fn commit_should_work() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); @@ -324,7 +293,7 @@ mod tests { #[test] fn set_and_retrieve_code() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let mut ext = ext.ext(); let code = vec![1, 2, 3]; @@ -336,12 +305,12 @@ mod tests { #[test] fn check_send() { fn assert_send() {} - assert_send::>(); + assert_send::>(); } #[test] fn commit_all_and_kill_child_storage() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let child_info = ChildInfo::new_default(&b"test_child"[..]); { @@ -366,7 +335,7 @@ mod tests { #[test] fn as_backend_generates_same_backend_as_commit_all() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); { let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 45474a44693ab..1144e258e0e28 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -204,9 +204,6 @@ pub mod well_known_keys { /// Current extrinsic index (u32) is stored under this key. pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; - /// Changes trie configuration is stored under this key. - pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; - /// Prefix of child storage keys. pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 975a81af4f53d..59733490a18ff 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -134,10 +134,6 @@ impl Externalities for AsyncExternalities { panic!("`child_storage_root`: should not be used in async externalities!") } - fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { - panic!("`storage_changes_root`: should not be used in async externalities!") - } - fn storage_start_transaction(&mut self) { unimplemented!("Transactions are not supported by AsyncExternalities"); } diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index d988160b1dc7b..341839a1deb20 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -73,15 +73,10 @@ pub type BlockNumber = u64; /// Index of a transaction. pub type Index = u64; /// The item of a block digest. -pub type DigestItem = sp_runtime::generic::DigestItem; +pub type DigestItem = sp_runtime::generic::DigestItem; /// The digest of a block. -pub type Digest = sp_runtime::generic::Digest; +pub type Digest = sp_runtime::generic::Digest; /// A test block. pub type Block = sp_runtime::generic::Block; /// A test block's header. pub type Header = sp_runtime::generic::Header; - -/// Changes trie configuration (optionally) used in tests. -pub fn changes_trie_config() -> sp_core::ChangesTrieConfiguration { - sp_core::ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 } -} diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index a8b2e8f57ac52..1277863c94f73 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -23,7 +23,6 @@ sc-client-db = { version = "0.10.0-dev", features = [ ], path = "../../client/db" } sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sc-executor = { version = "0.10.0-dev", path = "../../client/executor" } -sc-light = { version = "4.0.0-dev", path = "../../client/light" } sc-offchain = { version = "4.0.0-dev", path = "../../client/offchain" } sc-service = { version = "0.10.0-dev", default-features = false, features = [ "test-helpers", diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 9bc411af5d3ed..2d1cb4bbc66a2 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -45,21 +45,13 @@ use sc_client_api::BlockchainEvents; use sc_service::client::{ClientConfig, LocalCallExecutor}; use serde::Deserialize; use sp_core::storage::ChildInfo; -use sp_runtime::{ - codec::Encode, - traits::{BlakeTwo256, Block as BlockT}, - OpaqueExtrinsic, -}; +use sp_runtime::{codec::Encode, traits::Block as BlockT, OpaqueExtrinsic}; use std::{ collections::{HashMap, HashSet}, pin::Pin, sync::Arc, }; -/// Test client light database backend. -pub type LightBackend = - sc_light::Backend, BlakeTwo256>; - /// A genesis storage initialization trait. pub trait GenesisInit: Default { /// Construct genesis storage. diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index fbc6aefdb850c..e5cba3a3399a1 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -12,7 +12,6 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-light = { version = "4.0.0-dev", path = "../../../client/light" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index e8c1d2ac5cd48..4519dce65960d 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -19,7 +19,6 @@ use sc_client_api::backend; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_core::ChangesTrieConfiguration; use sc_block_builder::BlockBuilderApi; @@ -36,11 +35,6 @@ pub trait BlockBuilderExt { key: Vec, value: Option>, ) -> Result<(), sp_blockchain::Error>; - /// Add changes trie configuration update extrinsic to the block. - fn push_changes_trie_configuration_update( - &mut self, - new_config: Option, - ) -> Result<(), sp_blockchain::Error>; } impl<'a, A, B> BlockBuilderExt @@ -68,11 +62,4 @@ where ) -> Result<(), sp_blockchain::Error> { self.push(substrate_test_runtime::Extrinsic::StorageChange(key, value)) } - - fn push_changes_trie_configuration_update( - &mut self, - new_config: Option, - ) -> Result<(), sp_blockchain::Error> { - self.push(substrate_test_runtime::Extrinsic::ChangesTrieConfigUpdate(new_config)) - } } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index da92e0f37983c..2948e918cdf8b 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -24,22 +24,18 @@ pub mod trait_tests; mod block_builder_ext; pub use sc_consensus::LongestChain; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; pub use substrate_test_client::*; pub use substrate_test_runtime as runtime; pub use self::block_builder_ext::BlockBuilderExt; -use sc_client_api::light::{ - Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, - RemoteReadChildRequest, RemoteReadRequest, -}; use sp_core::{ sr25519, storage::{ChildInfo, Storage, StorageChild}, - ChangesTrieConfiguration, Pair, + Pair, }; -use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; /// A prelude to import in tests. @@ -51,8 +47,8 @@ pub mod prelude { }; // Client structs pub use super::{ - Backend, ExecutorDispatch, LightBackend, LightExecutor, LocalExecutorDispatch, - NativeElseWasmExecutor, TestClient, TestClientBuilder, WasmExecutionMethod, + Backend, ExecutorDispatch, LocalExecutorDispatch, NativeElseWasmExecutor, TestClient, + TestClientBuilder, WasmExecutionMethod, }; // Keyring pub use super::{AccountKeyring, Sr25519Keyring}; @@ -84,26 +80,9 @@ pub type ExecutorDispatch = client::LocalCallExecutor< NativeElseWasmExecutor, >; -/// Test client light database backend. -pub type LightBackend = substrate_test_client::LightBackend; - -/// Test client light executor. -pub type LightExecutor = sc_light::GenesisCallExecutor< - LightBackend, - client::LocalCallExecutor< - substrate_test_runtime::Block, - sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor, - >, - NativeElseWasmExecutor, - >, ->; - /// Parameters of test-client builder with test-runtime. #[derive(Default)] pub struct GenesisParameters { - changes_trie_config: Option, heap_pages_override: Option, extra_storage: Storage, wasm_code: Option>, @@ -112,7 +91,6 @@ pub struct GenesisParameters { impl GenesisParameters { fn genesis_config(&self) -> GenesisConfig { GenesisConfig::new( - self.changes_trie_config.clone(), vec![ sr25519::Public::from(Sr25519Keyring::Alice).into(), sr25519::Public::from(Sr25519Keyring::Bob).into(), @@ -215,12 +193,6 @@ pub trait TestClientBuilderExt: Sized { /// Returns a mutable reference to the genesis parameters. fn genesis_init_mut(&mut self) -> &mut GenesisParameters; - /// Set changes trie configuration for genesis. - fn changes_trie_config(mut self, config: Option) -> Self { - self.genesis_init_mut().changes_trie_config = config; - self - } - /// Override the default value for Wasm heap pages. fn set_heap_pages(mut self, heap_pages: u64) -> Self { self.genesis_init_mut().heap_pages_override = Some(heap_pages); @@ -308,142 +280,11 @@ impl TestClientBuilderExt } } -/// Type of optional fetch callback. -type MaybeFetcherCallback = - Option Result + Send + Sync>>; - -/// Type of fetcher future result. -type FetcherFutureResult = futures::future::Ready>; - -/// Implementation of light client fetcher used in tests. -#[derive(Default)] -pub struct LightFetcher { - call: MaybeFetcherCallback, Vec>, - body: MaybeFetcherCallback< - RemoteBodyRequest, - Vec, - >, -} - -impl LightFetcher { - /// Sets remote call callback. - pub fn with_remote_call( - self, - call: MaybeFetcherCallback, Vec>, - ) -> Self { - LightFetcher { call, body: self.body } - } - - /// Sets remote body callback. - pub fn with_remote_body( - self, - body: MaybeFetcherCallback< - RemoteBodyRequest, - Vec, - >, - ) -> Self { - LightFetcher { call: self.call, body } - } -} - -impl Fetcher for LightFetcher { - type RemoteHeaderResult = FetcherFutureResult; - type RemoteReadResult = FetcherFutureResult, Option>>>; - type RemoteCallResult = FetcherFutureResult>; - type RemoteChangesResult = - FetcherFutureResult, u32)>>; - type RemoteBodyResult = FetcherFutureResult>; - - fn remote_header( - &self, - _: RemoteHeaderRequest, - ) -> Self::RemoteHeaderResult { - unimplemented!() - } - - fn remote_read( - &self, - _: RemoteReadRequest, - ) -> Self::RemoteReadResult { - unimplemented!() - } - - fn remote_read_child( - &self, - _: RemoteReadChildRequest, - ) -> Self::RemoteReadResult { - unimplemented!() - } - - fn remote_call( - &self, - req: RemoteCallRequest, - ) -> Self::RemoteCallResult { - match self.call { - Some(ref call) => futures::future::ready(call(req)), - None => unimplemented!(), - } - } - - fn remote_changes( - &self, - _: RemoteChangesRequest, - ) -> Self::RemoteChangesResult { - unimplemented!() - } - - fn remote_body( - &self, - req: RemoteBodyRequest, - ) -> Self::RemoteBodyResult { - match self.body { - Some(ref body) => futures::future::ready(body(req)), - None => unimplemented!(), - } - } -} - /// Creates new client instance used for tests. pub fn new() -> Client { TestClientBuilder::new().build() } -/// Creates new light client instance used for tests. -pub fn new_light() -> ( - client::Client< - LightBackend, - LightExecutor, - substrate_test_runtime::Block, - substrate_test_runtime::RuntimeApi, - >, - Arc, -) { - let storage = sc_client_db::light::LightStorage::new_test(); - let blockchain = Arc::new(sc_light::Blockchain::new(storage)); - let backend = Arc::new(LightBackend::new(blockchain)); - let executor = new_native_executor(); - let local_call_executor = client::LocalCallExecutor::new( - backend.clone(), - executor, - Box::new(sp_core::testing::TaskExecutor::new()), - Default::default(), - ) - .expect("Creates LocalCallExecutor"); - let call_executor = LightExecutor::new(backend.clone(), local_call_executor); - - ( - TestClientBuilder::with_backend(backend.clone()) - .build_with_executor(call_executor) - .0, - backend, - ) -} - -/// Creates new light client fetcher used for tests. -pub fn new_light_fetcher() -> LightFetcher { - LightFetcher::default() -} - /// Create a new native executor. pub fn new_native_executor() -> sc_executor::NativeElseWasmExecutor { sc_executor::NativeElseWasmExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index a8801b8519dfe..a06d9f310fb04 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -23,7 +23,6 @@ use sc_service::client::genesis; use sp_core::{ map, storage::{well_known_keys, Storage}, - ChangesTrieConfiguration, }; use sp_io::hashing::{blake2_256, twox_128}; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; @@ -31,7 +30,6 @@ use std::collections::BTreeMap; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { - changes_trie_config: Option, authorities: Vec, balances: Vec<(AccountId, u64)>, heap_pages_override: Option, @@ -41,7 +39,6 @@ pub struct GenesisConfig { impl GenesisConfig { pub fn new( - changes_trie_config: Option, authorities: Vec, endowed_accounts: Vec, balance: u64, @@ -49,7 +46,6 @@ impl GenesisConfig { extra_storage: Storage, ) -> Self { GenesisConfig { - changes_trie_config, authorities, balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), heap_pages_override, @@ -77,9 +73,6 @@ impl GenesisConfig { .into_iter(), ) .collect(); - if let Some(ref changes_trie_config) = self.changes_trie_config { - map.insert(well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), changes_trie_config.encode()); - } map.insert(twox_128(&b"sys:auth"[..])[..].to_vec(), self.authorities.encode()); // Add the extra storage entries. map.extend(self.extra_storage.top.clone().into_iter()); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 943c41c247f75..08863de510d09 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -28,7 +28,7 @@ use scale_info::TypeInfo; use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; -use sp_core::{offchain::KeyTypeId, ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; +use sp_core::{offchain::KeyTypeId, OpaqueMetadata, RuntimeDebug}; use sp_trie::{ trie_types::{TrieDB, TrieDBMut}, PrefixedMemoryDB, StorageProof, @@ -161,7 +161,6 @@ pub enum Extrinsic { }, IncludeData(Vec), StorageChange(Vec, Option>), - ChangesTrieConfigUpdate(Option), OffchainIndexSet(Vec, Vec), OffchainIndexClear(Vec), Store(Vec), @@ -197,8 +196,6 @@ impl BlindCheckable for Extrinsic { }, Extrinsic::IncludeData(v) => Ok(Extrinsic::IncludeData(v)), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), - Extrinsic::ChangesTrieConfigUpdate(new_config) => - Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), Extrinsic::OffchainIndexSet(key, value) => Ok(Extrinsic::OffchainIndexSet(key, value)), Extrinsic::OffchainIndexClear(key) => Ok(Extrinsic::OffchainIndexClear(key)), Extrinsic::Store(data) => Ok(Extrinsic::Store(data)), @@ -265,9 +262,9 @@ pub type BlockNumber = u64; /// Index of a transaction. pub type Index = u64; /// The item of a block digest. -pub type DigestItem = sp_runtime::generic::DigestItem; +pub type DigestItem = sp_runtime::generic::DigestItem; /// The digest of a block. -pub type Digest = sp_runtime::generic::Digest; +pub type Digest = sp_runtime::generic::Digest; /// A test block. pub type Block = sp_runtime::generic::Block; /// A test block's header. @@ -1264,15 +1261,13 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { let db: sp_trie::MemoryDB = proof.into_memory_db(); let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let mut overlay = sp_state_machine::OverlayedChanges::default(); - let mut cache = sp_state_machine::StorageTransactionCache::<_, _, BlockNumber>::default(); + let mut cache = sp_state_machine::StorageTransactionCache::<_, _>::default(); let mut ext = sp_state_machine::Ext::new( &mut overlay, &mut cache, &backend, #[cfg(feature = "std")] None, - #[cfg(feature = "std")] - None, ); assert!(ext.storage(b"value3").is_some()); assert!(ext.storage_root().as_slice() == &root[..]); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 334569d055a0c..165fe0355628e 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -24,12 +24,8 @@ use crate::{ use codec::{Decode, Encode, KeyedVec}; use frame_support::{decl_module, decl_storage, storage}; use frame_system::Config; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; -use sp_io::{ - hashing::blake2_256, - storage::{changes_root as storage_changes_root, root as storage_root}, - trie, -}; +use sp_core::storage::well_known_keys; +use sp_io::{hashing::blake2_256, storage::root as storage_root, trie}; use sp_runtime::{ generic, traits::Header as _, @@ -54,7 +50,6 @@ decl_storage! { Number get(fn number): Option; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option>; - NewChangesTrieConfig get(fn new_changes_trie_config): Option>; StorageDigest get(fn storage_digest): Option; Authorities get(fn authorities) config(): Vec; } @@ -207,30 +202,17 @@ pub fn finalize_block() -> Header { let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = ::take(); - let new_changes_trie_config = ::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); - let storage_changes_root = storage_changes_root(&parent_hash.encode()) - .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); - - if let Some(storage_changes_root) = storage_changes_root { - digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); - } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } - if let Some(new_config) = new_changes_trie_config { - digest.push(generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(new_config), - )); - } - Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } @@ -251,8 +233,6 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), - Extrinsic::ChangesTrieConfigUpdate(ref new_config) => - execute_changes_trie_config_update(new_config.clone()), Extrinsic::OffchainIndexSet(key, value) => { sp_io::offchain_index::set(&key, &value); Ok(Ok(())) @@ -311,18 +291,6 @@ fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicRes Ok(Ok(())) } -fn execute_changes_trie_config_update( - new_config: Option, -) -> ApplyExtrinsicResult { - match new_config.clone() { - Some(new_config) => - storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), - None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), - } - ::put(new_config); - Ok(Ok(())) -} - #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; diff --git a/test-utils/test-runner/src/client.rs b/test-utils/test-runner/src/client.rs index 27c04c40fe6fe..c68a33056c163 100644 --- a/test-utils/test-runner/src/client.rs +++ b/test-utils/test-runner/src/client.rs @@ -159,7 +159,6 @@ where transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, - on_demand: None, block_announce_validator_builder: None, warp_sync: None, }; @@ -195,14 +194,12 @@ where backend: backend.clone(), task_manager: &mut task_manager, keystore: keystore.sync_keystore(), - on_demand: None, transaction_pool: transaction_pool.clone(), rpc_extensions_builder: Box::new(move |_, _| { let mut io = jsonrpc_core::IoHandler::default(); io.extend_with(ManualSealApi::to_delegate(ManualSeal::new(rpc_sink.clone()))); Ok(io) }), - remote_blockchain: None, network, system_rpc_tx, telemetry: None, diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs index 9114013b747f7..07259263c5e4d 100644 --- a/test-utils/test-runner/src/node.rs +++ b/test-utils/test-runner/src/node.rs @@ -25,10 +25,7 @@ use futures::{ }; use jsonrpc_core::MetaIoHandler; use manual_seal::EngineCommand; -use sc_client_api::{ - backend::{self, Backend}, - CallExecutor, ExecutorProvider, -}; +use sc_client_api::{backend::Backend, CallExecutor, ExecutorProvider}; use sc_executor::NativeElseWasmExecutor; use sc_service::{TFullBackend, TFullCallExecutor, TFullClient, TaskManager}; use sc_transaction_pool_api::TransactionPool; @@ -160,9 +157,6 @@ where { let id = BlockId::Hash(self.client.info().best_hash); let mut overlay = OverlayedChanges::default(); - let changes_trie = - backend::changes_tries_state_at_block(&id, self.backend.changes_trie_storage()) - .unwrap(); let mut cache = StorageTransactionCache::< T::Block, as Backend>::State, @@ -176,13 +170,7 @@ where .state_at(id.clone()) .expect(&format!("State at block {} not found", id)); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &state_backend, - changes_trie.clone(), - Some(&mut extensions), - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &state_backend, Some(&mut extensions)); sp_externalities::set_and_run_with_externalities(&mut ext, closure) } diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 5efa970d93580..48d05dd8dda9c 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -33,7 +33,7 @@ use sp_core::offchain::{ }; use sp_externalities::Extensions; use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_state_machine::StateMachine; use std::{fmt::Debug, sync::Arc, time}; @@ -152,9 +152,8 @@ impl BenchmarkCmd { // Get Benchmark List let state = &state_without_tracking; - let result = StateMachine::<_, _, NumberFor, _>::new( + let result = StateMachine::new( state, - None, &mut changes, &executor, "Benchmark_benchmark_metadata", @@ -243,9 +242,8 @@ impl BenchmarkCmd { if !self.no_verify { // Dont use these results since verification code will add overhead let state = &state_without_tracking; - let _results = StateMachine::<_, _, NumberFor, _>::new( + let _results = StateMachine::new( state, - None, &mut changes, &executor, "Benchmark_dispatch_benchmark", @@ -270,9 +268,8 @@ impl BenchmarkCmd { // Do one loop of DB tracking. { let state = &state_with_tracking; - let result = StateMachine::<_, _, NumberFor, _>::new( + let result = StateMachine::new( state, // todo remove tracking - None, &mut changes, &executor, "Benchmark_dispatch_benchmark", @@ -303,9 +300,8 @@ impl BenchmarkCmd { // Finally run a bunch of loops to get extrinsic timing information. for r in 0..self.external_repeat { let state = &state_without_tracking; - let result = StateMachine::<_, _, NumberFor, _>::new( + let result = StateMachine::new( state, // todo remove tracking - None, &mut changes, &executor, "Benchmark_dispatch_benchmark", diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index f0f37f0b20675..41a5f3ba0eb3e 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -20,14 +20,13 @@ use std::sync::Arc; use codec::{Codec, Decode, Encode}; -use futures::{future::ready, FutureExt, TryFutureExt}; +use futures::FutureExt; use jsonrpc_core::{Error as RpcError, ErrorCode}; use jsonrpc_derive::rpc; -use sc_client_api::light::{future_header, Fetcher, RemoteBlockchain, RemoteCallRequest}; use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as ClientError, HeaderBackend}; +use sp_blockchain::HeaderBackend; use sp_core::{hexdisplay::HexDisplay, Bytes}; use sp_runtime::{generic::BlockId, traits}; @@ -154,90 +153,6 @@ where } } -/// An implementation of System-specific RPC methods on light client. -pub struct LightSystem { - client: Arc, - remote_blockchain: Arc>, - fetcher: Arc, - pool: Arc

, -} - -impl LightSystem { - /// Create new `LightSystem`. - pub fn new( - client: Arc, - remote_blockchain: Arc>, - fetcher: Arc, - pool: Arc

, - ) -> Self { - LightSystem { client, remote_blockchain, fetcher, pool } - } -} - -impl SystemApi<::Hash, AccountId, Index> - for LightSystem -where - P: TransactionPool + 'static, - C: HeaderBackend, - C: Send + Sync + 'static, - F: Fetcher + 'static, - Block: traits::Block, - AccountId: Clone + std::fmt::Display + Codec + Send + 'static, - Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, -{ - fn nonce(&self, account: AccountId) -> FutureResult { - let best_hash = self.client.info().best_hash; - let best_id = BlockId::hash(best_hash); - let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); - let fetcher = self.fetcher.clone(); - let call_data = account.encode(); - let future_best_header = future_best_header.and_then(move |maybe_best_header| { - ready( - maybe_best_header - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", best_hash))), - ) - }); - - let future_nonce = future_best_header.and_then(move |best_header| { - fetcher.remote_call(RemoteCallRequest { - block: best_hash, - header: best_header, - method: "AccountNonceApi_account_nonce".into(), - call_data, - retry_count: None, - }) - }); - - let future_nonce = future_nonce.and_then(|nonce| async move { - Index::decode(&mut &nonce[..]) - .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e)) - }); - let future_nonce = future_nonce.map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to query nonce.".into(), - data: Some(format!("{:?}", e).into()), - }); - - let pool = self.pool.clone(); - future_nonce.map_ok(move |nonce| adjust_nonce(&*pool, account, nonce)).boxed() - } - - fn dry_run( - &self, - _extrinsic: Bytes, - _at: Option<::Hash>, - ) -> FutureResult { - async { - Err(RpcError { - code: ErrorCode::MethodNotFound, - message: "Unable to dry run extrinsic.".into(), - data: None, - }) - } - .boxed() - } -} - /// Adjust account nonce from state, so that tx with the nonce will be /// placed after all ready txpool transactions. fn adjust_nonce(pool: &P, account: AccountId, nonce: Index) -> Index diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 27fb35dd7a46a..70f177dc1f869 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -152,12 +152,7 @@ where .map_err(|e| format!("failed to decode output: {:?}", e))?; let storage_changes = changes - .drain_storage_changes::<_, _, NumberFor>( - &state_ext.backend, - None, - Default::default(), - &mut Default::default(), - ) + .drain_storage_changes(&state_ext.backend, Default::default(), &mut Default::default()) .unwrap(); state_ext.backend.apply_transaction( storage_changes.transaction_storage_root, diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index a74625492bb1c..4aa1c213703ed 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -680,9 +680,8 @@ pub(crate) fn state_machine_call sc_cli::Result<(OverlayedChanges, Vec)> { let mut changes = Default::default(); - let encoded_results = StateMachine::<_, _, NumberFor, _>::new( + let encoded_results = StateMachine::new( &ext.backend, - None, &mut changes, executor, method, From 9e8c7b80e9c167dc81244cdae0097d463c1b2e66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Nov 2021 14:42:26 +0100 Subject: [PATCH 089/162] Bump rustversion from 1.0.4 to 1.0.5 (#10243) Bumps [rustversion](https://github.com/dtolnay/rustversion) from 1.0.4 to 1.0.5. - [Release notes](https://github.com/dtolnay/rustversion/releases) - [Commits](https://github.com/dtolnay/rustversion/compare/1.0.4...1.0.5) --- updated-dependencies: - dependency-name: rustversion dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- frame/support/test/Cargo.toml | 2 +- primitives/api/test/Cargo.toml | 2 +- primitives/runtime-interface/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74504d5f1547b..998072f2f9327 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7510,9 +7510,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" +checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" [[package]] name = "rw-stream-sink" diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 762c85f75c363..09a1538e2741d 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -25,7 +25,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../pri sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } trybuild = "1.0.52" pretty_assertions = "1.0.0" -rustversion = "1.0.0" +rustversion = "1.0.5" frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message test-pallet = { package = "frame-support-test-pallet", default-features = false, path = "pallet" } diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 8c274b386470a..dccc123142dac 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -22,7 +22,7 @@ sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-build codec = { package = "parity-scale-codec", version = "2.0.0" } sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } trybuild = "1.0.52" -rustversion = "1.0.0" +rustversion = "1.0.5" [dev-dependencies] criterion = "0.3.0" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index c640f02824f24..dd93b92b9be4e 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -30,7 +30,7 @@ sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } sp-state-machine = { version = "0.10.0-dev", path = "../state-machine" } sp-core = { version = "4.0.0-dev", path = "../core" } sp-io = { version = "4.0.0-dev", path = "../io" } -rustversion = "1.0.0" +rustversion = "1.0.5" trybuild = "1.0.52" [features] From 3f3cc90c06a1e0a7a93dbd5b660a475e9b4a783e Mon Sep 17 00:00:00 2001 From: Sergei Shulepov Date: Fri, 12 Nov 2021 20:38:26 +0100 Subject: [PATCH 090/162] Update requirement for pwasm-utils (#10250) --- client/executor/common/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 276fe9a8380b4..1bb057fabad84 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" -pwasm-utils = "0.18.0" +pwasm-utils = "0.18.2" codec = { package = "parity-scale-codec", version = "2.0.0" } wasmi = "0.9.1" sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } From 54699ec8220fd13fa353f1846db0c923a0fdc310 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 Nov 2021 00:32:44 +0000 Subject: [PATCH 091/162] Bump hex-literal from 0.3.3 to 0.3.4 (#10246) Bumps [hex-literal](https://github.com/RustCrypto/utils) from 0.3.3 to 0.3.4. - [Release notes](https://github.com/RustCrypto/utils/releases) - [Commits](https://github.com/RustCrypto/utils/compare/hex-literal-v0.3.3...hex-literal-v0.3.4) --- updated-dependencies: - dependency-name: hex-literal dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- bin/node-template/runtime/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- client/executor/Cargo.toml | 2 +- client/service/test/Cargo.toml | 2 +- frame/benchmarking/Cargo.toml | 2 +- frame/executive/Cargo.toml | 2 +- frame/transaction-storage/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/state-machine/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 998072f2f9327..c90cf1cf52861 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2598,9 +2598,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e4590e13640f19f249fe3e4eca5113bc4289f2497710378190e7f4bd96f45b" +checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" [[package]] name = "hex_fmt" diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index e4e2039866b46..bff47f5a7ea39 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -45,7 +45,7 @@ pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-fe # Used for runtime benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -hex-literal = { version = "0.3.3", optional = true } +hex-literal = { version = "0.3.4", optional = true } pallet-template = { version = "3.0.0", default-features = false, path = "../pallets/template" } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index fc39e47ce4113..4e29c7c5314e2 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -37,7 +37,7 @@ crate-type = ["cdylib", "rlib"] codec = { package = "parity-scale-codec", version = "2.0.0" } serde = { version = "1.0.126", features = ["derive"] } futures = "0.3.16" -hex-literal = "0.3.3" +hex-literal = "0.3.4" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index c0b888e55b1f6..d086b6f12d590 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "2.2.0", default-features = ] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } static_assertions = "1.1.0" -hex-literal = { version = "0.3.3", optional = true } +hex-literal = { version = "0.3.4", optional = true } log = { version = "0.4.14", default-features = false } # primitives diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 54c4a91d72805..4a8e678105437 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -37,7 +37,7 @@ sp-core-hashing-proc-macro = { version = "4.0.0-dev", path = "../../primitives/c [dev-dependencies] wat = "1.0" -hex-literal = "0.3.3" +hex-literal = "0.3.4" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 7694e0f6893d6..114473b66c975 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] hex = "0.4" -hex-literal = "0.3.3" +hex-literal = "0.3.4" tempfile = "3.1.0" tokio = { version = "1.10.0", features = ["time"] } log = "0.4.8" diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index fdc386978dee5..a6bfcf2a552fc 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -28,7 +28,7 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys log = { version = "0.4.14", default-features = false } [dev-dependencies] -hex-literal = "0.3.3" +hex-literal = "0.3.4" [features] default = ["std"] diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 52d2f41cb1e34..20e31fe0a5720 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -26,7 +26,7 @@ sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primiti sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } [dev-dependencies] -hex-literal = "0.3.3" +hex-literal = "0.3.4" sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index 7167ae7424571..a8c6e60af2f82 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.126", optional = true } -hex-literal = { version = "0.3.3", optional = true } +hex-literal = { version = "0.3.4", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index eaf1d26c2898c..dc8904f33e0c1 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -73,7 +73,7 @@ sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = [dev-dependencies] sp-serializer = { version = "4.0.0-dev", path = "../serializer" } -hex-literal = "0.3.3" +hex-literal = "0.3.4" rand = "0.7.2" criterion = "0.3.3" serde_json = "1.0" diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 783837a6442b6..0133ea17370a8 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -32,7 +32,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } tracing = { version = "0.1.29", optional = true } [dev-dependencies] -hex-literal = "0.3.3" +hex-literal = "0.3.4" sp-runtime = { version = "4.0.0-dev", path = "../runtime" } pretty_assertions = "1.0.0" rand = "0.7.2" diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 6190df210403c..8eaeb424781bf 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -31,7 +31,7 @@ sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } trie-bench = "0.28.0" trie-standardmap = "0.15.2" criterion = "0.3.3" -hex-literal = "0.3.3" +hex-literal = "0.3.4" sp-runtime = { version = "4.0.0-dev", path = "../runtime" } [features] From b01bd3162dd3909f9eba33ba0e63ab074f26d53f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 Nov 2021 02:35:07 +0000 Subject: [PATCH 092/162] Bump structopt from 0.3.23 to 0.3.25 (#10253) * Bump structopt from 0.3.23 to 0.3.25 Bumps [structopt](https://github.com/TeXitoi/structopt) from 0.3.23 to 0.3.25. - [Release notes](https://github.com/TeXitoi/structopt/releases) - [Changelog](https://github.com/TeXitoi/structopt/blob/master/CHANGELOG.md) - [Commits](https://github.com/TeXitoi/structopt/compare/v0.3.23...v0.3.25) --- updated-dependencies: - dependency-name: structopt dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * cargo fmt Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Keith Yeung --- Cargo.lock | 8 ++++---- bin/node-template/node/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 4 ++-- bin/utils/chain-spec-builder/Cargo.toml | 2 +- bin/utils/subkey/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/consensus/babe/src/verification.rs | 4 ++-- client/network/src/protocol/notifications/behaviour.rs | 4 ++-- client/network/src/protocol/sync/blocks.rs | 2 +- client/network/src/service/tests.rs | 2 +- client/network/src/transactions.rs | 4 ++-- frame/bags-list/remote-tests/Cargo.toml | 2 +- frame/election-provider-multi-phase/src/lib.rs | 2 +- .../support/procedural/src/pallet/parse/pallet_struct.rs | 4 ++-- primitives/npos-elections/fuzzer/Cargo.toml | 2 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- utils/frame/frame-utilities-cli/Cargo.toml | 2 +- utils/frame/generate-bags/Cargo.toml | 2 +- utils/frame/generate-bags/node-runtime/Cargo.toml | 2 +- utils/frame/try-runtime/cli/Cargo.toml | 2 +- 20 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c90cf1cf52861..e41a6c867177f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10010,9 +10010,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "structopt" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa" +checksum = "40b9788f4202aa75c240ecc9c15c65185e6a39ccdeb0fd5d008b98825464c87c" dependencies = [ "clap", "lazy_static", @@ -10021,9 +10021,9 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.16" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck", "proc-macro-error", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 1d8abad406a14..6f3a63593aa3e 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -structopt = "0.3.8" +structopt = "0.3.25" sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4e29c7c5314e2..374015a3426c9 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -40,7 +40,7 @@ futures = "0.3.16" hex-literal = "0.3.4" log = "0.4.8" rand = "0.7.2" -structopt = { version = "0.3.8", optional = true } +structopt = { version = "0.3.25", optional = true } # primitives sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/authority-discovery" } @@ -136,7 +136,7 @@ remote-externalities = { path = "../../../utils/frame/remote-externalities" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } [build-dependencies] -structopt = { version = "0.3.8", optional = true } +structopt = { version = "0.3.25", optional = true } node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../../../utils/build-script-utils" } diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index a35fbba5cdc46..daffd5a0359a5 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -21,4 +21,4 @@ node-cli = { version = "3.0.0-dev", path = "../../node/cli" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } rand = "0.7.2" -structopt = "0.3.8" +structopt = "0.3.25" diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 14ba673b33be2..d3c38edb5771d 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -18,4 +18,4 @@ name = "subkey" [dependencies] sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } -structopt = "0.3.14" +structopt = "0.3.25" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 9dc7ff730600b..b027063b109a8 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -38,7 +38,7 @@ sc-service = { version = "0.10.0-dev", default-features = false, path = "../serv sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } names = { version = "0.12.0", default-features = false } -structopt = "0.3.8" +structopt = "0.3.25" sc-tracing = { version = "4.0.0-dev", path = "../tracing" } chrono = "0.4.10" serde = "1.0.126" diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 174b2d03c6ef0..2322a96262161 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -111,7 +111,7 @@ pub(super) fn check_header( ); check_secondary_plain_header::(pre_hash, secondary, sig, &epoch)?; - } + }, PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { @@ -122,7 +122,7 @@ pub(super) fn check_header( ); check_secondary_vrf_header::(pre_hash, secondary, sig, &epoch)?; - } + }, _ => return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)), } diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 01138e3207570..f66f1fbe9e95a 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -712,7 +712,7 @@ impl Notifications { timer: delay_id, timer_deadline: *backoff, }; - } + }, // Disabled => Enabled PeerState::Disabled { mut connections, backoff_until } => { @@ -2085,7 +2085,7 @@ impl NetworkBehaviour for Notifications { .boxed(), ); } - } + }, // We intentionally never remove elements from `delays`, and it may // thus contain obsolete entries. This is a normal situation. diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index 30ba7ffafeffc..ce4535dc0b45f 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -203,7 +203,7 @@ impl BlockCollection { { *downloading -= 1; false - } + }, Some(&mut BlockRangeState::Downloading { .. }) => true, _ => false, }; diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 1c66986e422fc..8271da886fca7 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -529,7 +529,7 @@ fn fallback_name_working() { { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); break - } + }, _ => {}, }; } diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 99350f603a375..6d190651160f0 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -336,13 +336,13 @@ impl TransactionsHandler { }, ); debug_assert!(_was_in.is_none()); - } + }, Event::NotificationStreamClosed { remote, protocol } if protocol == self.protocol_name => { let _peer = self.peers.remove(&remote); debug_assert!(_peer.is_some()); - } + }, Event::NotificationsReceived { remote, messages } => { for (protocol, message) in messages { diff --git a/frame/bags-list/remote-tests/Cargo.toml b/frame/bags-list/remote-tests/Cargo.toml index 37f351f0d27ef..ecc0b4da242c7 100644 --- a/frame/bags-list/remote-tests/Cargo.toml +++ b/frame/bags-list/remote-tests/Cargo.toml @@ -33,5 +33,5 @@ remote-externalities = { path = "../../../utils/frame/remote-externalities", ver # others tokio = { version = "1", features = ["macros"] } log = "0.4.14" -structopt = "0.3.23" +structopt = "0.3.25" clap = "2.33.3" diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 80a13aa99fb70..4c4de82af592f 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -772,7 +772,7 @@ pub mod pallet { Self::on_initialize_open_unsigned(enabled, now); T::WeightInfo::on_initialize_open_unsigned() } - } + }, _ => T::WeightInfo::on_initialize_nothing(), } } diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index 278f46e13818e..c528faf669ee3 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -130,12 +130,12 @@ impl PalletStructDef { if generate_storage_info.is_none() => { generate_storage_info = Some(span); - } + }, PalletStructAttr::StorageVersion { storage_version, .. } if storage_version_found.is_none() => { storage_version_found = Some(storage_version); - } + }, attr => { let msg = "Unexpected duplicated attribute"; return Err(syn::Error::new(attr.span(), msg)) diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index f9fce9d8744da..59d12c5a5d2ee 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -20,7 +20,7 @@ honggfuzz = "0.5" rand = { version = "0.7.3", features = ["std", "small_rng"] } sp-npos-elections = { version = "4.0.0-dev", path = ".." } sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } -structopt = "0.3.21" +structopt = "0.3.25" [[bin]] name = "reduce" diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 605a14e3adff7..03520f9cbd5d7 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -25,7 +25,7 @@ sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } codec = { version = "2.0.0", package = "parity-scale-codec" } -structopt = "0.3.8" +structopt = "0.3.25" chrono = "0.4" serde = "1.0.126" handlebars = "3.5.0" diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index 9d14819337419..dfbf73ee9d5fa 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -structopt = "0.3.8" +structopt = "0.3.25" frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml index 4bc27021b4c57..2c9374fc5cca4 100644 --- a/utils/frame/generate-bags/Cargo.toml +++ b/utils/frame/generate-bags/Cargo.toml @@ -23,4 +23,4 @@ sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } chrono = { version = "0.4.19" } git2 = { version = "0.13.23", default-features = false } num-format = { version = "0.4.0" } -structopt = "0.3.21" +structopt = "0.3.25" diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml index b0256722f466c..b5a7e0b898c92 100644 --- a/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -14,4 +14,4 @@ node-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runtime" } generate-bags = { version = "4.0.0-dev", path = "../" } # third-party -structopt = "0.3.21" +structopt = "0.3.25" diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index a89a625bbd9ed..44be678ba3814 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] log = "0.4.8" parity-scale-codec = { version = "2.3.1" } serde = "1.0.126" -structopt = "0.3.8" +structopt = "0.3.25" sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../../client/service" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } From ab40c7ef81b1ecdce254c1b76bbaf559b5473576 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 Nov 2021 19:56:41 +0100 Subject: [PATCH 093/162] Bump nix from 0.19.1 to 0.23.0 (#10254) Bumps [nix](https://github.com/nix-rust/nix) from 0.19.1 to 0.23.0. - [Release notes](https://github.com/nix-rust/nix/releases) - [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md) - [Commits](https://github.com/nix-rust/nix/compare/v0.19.1...v0.23.0) --- updated-dependencies: - dependency-name: nix dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 5 +++-- bin/node/cli/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e41a6c867177f..7c32ecb3f36d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4469,14 +4469,15 @@ dependencies = [ [[package]] name = "nix" -version = "0.19.1" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ccba0cfe4fdf15982d1674c69b1fd80bad427d293849982668dfe454bd61f2" +checksum = "f305c2c2e4c39a82f7bf0bf65fb557f9070ce06781d4f2454295cc34b1c43188" dependencies = [ "bitflags", "cc", "cfg-if 1.0.0", "libc", + "memoffset", ] [[package]] diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 374015a3426c9..122f2d0c2c8fc 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -122,7 +122,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" futures = "0.3.16" tempfile = "3.1.0" assert_cmd = "2.0.2" -nix = "0.19" +nix = "0.23" serde_json = "1.0" regex = "1" platforms = "1.1" From ed60316879773b5d81cad4525fd9c1a511de2952 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 Nov 2021 21:27:46 +0100 Subject: [PATCH 094/162] Bump cargo_metadata from 0.13.1 to 0.14.1 (#10255) Bumps [cargo_metadata](https://github.com/oli-obk/cargo_metadata) from 0.13.1 to 0.14.1. - [Release notes](https://github.com/oli-obk/cargo_metadata/releases) - [Commits](https://github.com/oli-obk/cargo_metadata/compare/v0.13.1...0.14.1) --- updated-dependencies: - dependency-name: cargo_metadata dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 11 ++++++----- utils/wasm-builder/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7c32ecb3f36d1..57bf1278822c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -812,14 +812,13 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.13.1" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "081e3f0755c1f380c2d010481b6fa2e02973586d5f2b24eebb7a2a1d98b143d8" +checksum = "ba2ae6de944143141f6155a473a6b02f66c7c3f9f47316f802f80204ebfe6e12" dependencies = [ "camino", "cargo-platform", - "semver 0.11.0", - "semver-parser 0.10.2", + "semver 1.0.4", "serde", "serde_json", ] @@ -8844,7 +8843,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ "semver-parser 0.10.2", - "serde", ] [[package]] @@ -8852,6 +8850,9 @@ name = "semver" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +dependencies = [ + "serde", +] [[package]] name = "semver-parser" diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 88318f4f0d54c..a2a56a5bf22cf 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] build-helper = "0.1.1" -cargo_metadata = "0.13.1" +cargo_metadata = "0.14.1" tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.2" From ee8ac3a6c9c9292911afbf5ec16fbb2bb0d44b27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 13 Nov 2021 23:57:19 +0100 Subject: [PATCH 095/162] Fix cargo unleash check (#10261) --- utils/frame/remote-externalities/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 56f797343c0f4..8f54dd01df0a3 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.4.1", features = ["ws-client", "macros"] } env_logger = "0.9" -frame-support = { path = "../../../frame/support", optional = true } +frame-support = { path = "../../../frame/support", optional = true, version = "4.0.0-dev" } log = "0.4.11" codec = { package = "parity-scale-codec", version = "2.0.0" } serde_json = "1.0" From 1b646b219e0e0c7489a2a8488381fc2e32da6a1c Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Sun, 14 Nov 2021 15:04:20 +0000 Subject: [PATCH 096/162] rework `staking::reap_stash` (#10178) * rework reap_stash * Update frame/staking/src/pallet/mod.rs Co-authored-by: Zeke Mostov * Update frame/staking/src/pallet/mod.rs Co-authored-by: Zeke Mostov * Update frame/staking/src/pallet/mod.rs Co-authored-by: Zeke Mostov * Fix Co-authored-by: Shawn Tabrizi Co-authored-by: Zeke Mostov --- frame/staking/src/benchmarking.rs | 5 +- frame/staking/src/pallet/mod.rs | 40 +++++----- frame/staking/src/tests.rs | 118 +++++++----------------------- 3 files changed, 50 insertions(+), 113 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 80630818de7e6..e312aedbec1f3 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -28,7 +28,7 @@ use frame_support::{ traits::{Currency, CurrencyToVote, Get, Imbalance}, }; use sp_runtime::{ - traits::{StaticLookup, Zero}, + traits::{Bounded, One, StaticLookup, Zero}, Perbill, Percent, }; use sp_staking::SessionIndex; @@ -38,7 +38,6 @@ pub use frame_benchmarking::{ account, benchmarks, impl_benchmark_test_suite, whitelist_account, whitelisted_caller, }; use frame_system::RawOrigin; -use sp_runtime::traits::{Bounded, One}; const SEED: u32 = 0; const MAX_SPANS: u32 = 100; @@ -695,7 +694,7 @@ benchmarks! { let stash = scenario.origin_stash1.clone(); add_slashing_spans::(&stash, s); - T::Currency::make_free_balance_be(&stash, T::Currency::minimum_balance()); + Ledger::::insert(&controller, StakingLedger { active: T::Currency::minimum_balance() - One::one(), total: T::Currency::minimum_balance() - One::one(), ..Default::default() }); assert!(Bonded::::contains_key(&stash)); assert!(T::SortedListProvider::contains(&stash)); diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 8e97a90e07544..197c2eed325a1 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -1425,33 +1425,37 @@ pub mod pallet { Ok(()) } - /// Remove all data structure concerning a staker/stash once its balance is at the minimum. - /// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone - /// and the target `stash` must have no funds left beyond the ED. + /// Remove all data structures concerning a staker/stash once it is at a state where it can + /// be considered `dust` in the staking system. The requirements are: /// - /// This can be called from any origin. + /// 1. the `total_balance` of the stash is below existential deposit. + /// 2. or, the `ledger.total` of the stash is below existential deposit. /// - /// - `stash`: The stash account to reap. Its balance must be zero. + /// The former can happen in cases like a slash; the latter when a fully unbonded account + /// is still receiving staking rewards in `RewardDestination::Staked`. /// - /// # - /// Complexity: O(S) where S is the number of slashing spans on the account. - /// DB Weight: - /// - Reads: Stash Account, Bonded, Slashing Spans, Locks - /// - Writes: Bonded, Slashing Spans (if S > 0), Ledger, Payee, Validators, Nominators, - /// Stash Account, Locks - /// - Writes Each: SpanSlash * S - /// # + /// It can be called by anyone, as long as `stash` meets the above requirements. + /// + /// Refunds the transaction fees upon successful execution. #[pallet::weight(T::WeightInfo::reap_stash(*num_slashing_spans))] pub fn reap_stash( - _origin: OriginFor, + origin: OriginFor, stash: T::AccountId, num_slashing_spans: u32, - ) -> DispatchResult { - let at_minimum = T::Currency::total_balance(&stash) == T::Currency::minimum_balance(); - ensure!(at_minimum, Error::::FundedTarget); + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + let ed = T::Currency::minimum_balance(); + let reapable = T::Currency::total_balance(&stash) < ed || + Self::ledger(Self::bonded(stash.clone()).ok_or(Error::::NotStash)?) + .map(|l| l.total) + .unwrap_or_default() < ed; + ensure!(reapable, Error::::FundedTarget); + Self::kill_stash(&stash, num_slashing_spans)?; T::Currency::remove_lock(STAKING_ID, &stash); - Ok(()) + + Ok(Pays::No.into()) } /// Remove the given nominations from the calling validator. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index d6d92d5bd57fc..8e8a7ee636d8d 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1633,115 +1633,49 @@ fn reward_to_stake_works() { } #[test] -fn on_free_balance_zero_stash_removes_validator() { - // Tests that validator storage items are cleaned up when stash is empty - // Tests that storage items are untouched when controller is empty +fn reap_stash_works() { ExtBuilder::default() .existential_deposit(10) .balance_factor(10) .build_and_execute(|| { - // Check the balance of the validator account + // given assert_eq!(Balances::free_balance(10), 10); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 10 * 1000); - // Check these two accounts are bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - - // Check the balance of the stash account has not been touched assert_eq!(Balances::free_balance(11), 10 * 1000); - // Check these two accounts are still bonded assert_eq!(Staking::bonded(&11), Some(10)); - // Check storage items have not changed assert!(>::contains_key(&10)); assert!(>::contains_key(&11)); assert!(>::contains_key(&11)); assert!(>::contains_key(&11)); - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 10); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); -} - -#[test] -fn on_free_balance_zero_stash_removes_nominator() { - // Tests that nominator storage items are cleaned up when stash is empty - // Tests that storage items are untouched when controller is empty - ExtBuilder::default() - .existential_deposit(10) - .balance_factor(10) - .build_and_execute(|| { - // Make 10 a nominator - assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); - // Check that account 10 is a nominator - assert!(>::contains_key(11)); - // Check the balance of the nominator account - assert_eq!(Balances::free_balance(10), 10); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 10_000); - - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - // Check total balance of account 10 - assert_eq!(Balances::total_balance(&10), 0); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 10_000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); + // stash is not reapable + assert_noop!( + Staking::reap_stash(Origin::signed(20), 11, 0), + Error::::FundedTarget + ); + // controller or any other account is not reapable + assert_noop!(Staking::reap_stash(Origin::signed(20), 10, 0), Error::::NotStash); - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 10); + // no easy way to cause an account to go below ED, we tweak their staking ledger + // instead. + Ledger::::insert( + 10, + StakingLedger { + stash: 11, + total: 5, + active: 5, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::none(), 11, 0)); + // reap-able + assert_ok!(Staking::reap_stash(Origin::signed(20), 11, 0)); - // Check storage items do not exist + // then assert!(!>::contains_key(&10)); assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); }); } @@ -2556,10 +2490,10 @@ fn garbage_collection_after_slashing() { // reap_stash respects num_slashing_spans so that weight is accurate assert_noop!( - Staking::reap_stash(Origin::none(), 11, 0), + Staking::reap_stash(Origin::signed(20), 11, 0), Error::::IncorrectSlashingSpans ); - assert_ok!(Staking::reap_stash(Origin::none(), 11, 2)); + assert_ok!(Staking::reap_stash(Origin::signed(20), 11, 2)); assert!(::SlashingSpans::get(&11).is_none()); assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); From 7db0768a85dc36a3f2a44d042b32f3715c00a90d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 15 Nov 2021 12:45:37 +0100 Subject: [PATCH 097/162] Keystore: Store files with permission 600 on unix (#10263) --- client/keystore/src/local.rs | 44 +++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index e5c8ff14af095..965e68336e3bc 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -364,8 +364,7 @@ impl KeystoreInner { let path = path.into(); fs::create_dir_all(&path)?; - let instance = Self { path: Some(path), additional: HashMap::new(), password }; - Ok(instance) + Ok(Self { path: Some(path), additional: HashMap::new(), password }) } /// Get the password for this store. @@ -397,10 +396,9 @@ impl KeystoreInner { /// Places it into the file system store, if a path is configured. fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { if let Some(path) = self.key_file_path(public, key_type) { - let mut file = File::create(path).map_err(Error::Io)?; - serde_json::to_writer(&file, &suri).map_err(Error::Json)?; - file.flush().map_err(Error::Io)?; + Self::write_to_file(path, suri)?; } + Ok(()) } @@ -411,15 +409,29 @@ impl KeystoreInner { fn generate_by_type(&mut self, key_type: KeyTypeId) -> Result { let (pair, phrase, _) = Pair::generate_with_phrase(self.password()); if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { - let mut file = File::create(path)?; - serde_json::to_writer(&file, &phrase)?; - file.flush()?; + Self::write_to_file(path, &phrase)?; } else { self.insert_ephemeral_pair(&pair, &phrase, key_type); } + Ok(pair) } + /// Write the given `data` to `file`. + fn write_to_file(file: PathBuf, data: &str) -> Result<()> { + let mut file = File::create(file)?; + serde_json::to_writer(&file, data)?; + file.flush()?; + + #[cfg(target_family = "unix")] + { + use std::os::unix::fs::PermissionsExt; + file.set_permissions(fs::Permissions::from_mode(0o600))?; + } + + Ok(()) + } + /// Create a new key from seed. /// /// Does not place it into the file system store. @@ -735,4 +747,20 @@ mod tests { SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, None).unwrap(); assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 2); } + + #[test] + #[cfg(target_family = "unix")] + fn uses_correct_file_permissions_on_unix() { + use std::os::unix::fs::PermissionsExt; + + let temp_dir = TempDir::new().unwrap(); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + + let public = SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, None).unwrap(); + + let path = store.0.read().key_file_path(public.as_ref(), TEST_KEY_TYPE).unwrap(); + let permissions = File::open(path).unwrap().metadata().unwrap().permissions(); + + assert_eq!(0o100600, permissions.mode()); + } } From 7ebab91ed5179c3be7176c04d6a4f3996cb50563 Mon Sep 17 00:00:00 2001 From: Ayevbeosa Iyamu Date: Mon, 15 Nov 2021 20:16:03 +0100 Subject: [PATCH 098/162] Add field names to pallet `Event` variants (#10184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Changed named fields to struct variants * Fixed errors. * Made adjustments as per `cargo +nightly fmt`. * Update frame/uniques/src/lib.rs Co-authored-by: Alexander Theißen * Removed redundant comments for structs. * Moved frame-support to dev dependencies Co-authored-by: Alexander Theißen --- frame/contracts/src/exec.rs | 8 +- frame/proxy/src/tests.rs | 6 +- frame/uniques/src/benchmarking.rs | 52 ++++---- frame/uniques/src/functions.rs | 13 +- frame/uniques/src/impl_nonfungibles.rs | 2 +- frame/uniques/src/lib.rs | 174 +++++++++++++++---------- frame/utility/src/lib.rs | 9 +- frame/utility/src/tests.rs | 20 ++- frame/vesting/src/lib.rs | 27 ++-- 9 files changed, 185 insertions(+), 126 deletions(-) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 7ef1aec2dfc60..90a640418bcc0 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -2157,10 +2157,10 @@ mod tests { }, EventRecord { phase: Phase::Initialization, - event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted( - 1, - frame_system::Error::::CallFiltered.into() - ),), + event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted { + index: 1, + error: frame_system::Error::::CallFiltered.into() + },), topics: vec![], }, ] diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 93a0e4ce7d622..ed21a80f62139 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -369,7 +369,8 @@ fn filtering_works() { ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ - UtilityEvent::BatchInterrupted(0, SystemError::CallFiltered.into()).into(), + UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } + .into(), ProxyEvent::ProxyExecuted(Ok(())).into(), ]); @@ -387,7 +388,8 @@ fn filtering_works() { ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ - UtilityEvent::BatchInterrupted(0, SystemError::CallFiltered.into()).into(), + UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } + .into(), ProxyEvent::ProxyExecuted(Ok(())).into(), ]); diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 0e161bf7bfe85..513509bda70ea 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -141,7 +141,7 @@ benchmarks_instance_pallet! { T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) verify { - assert_last_event::(Event::Created(Default::default(), caller.clone(), caller).into()); + assert_last_event::(Event::Created { class: Default::default(), creator: caller.clone(), owner: caller }.into()); } force_create { @@ -149,7 +149,7 @@ benchmarks_instance_pallet! { let caller_lookup = T::Lookup::unlookup(caller.clone()); }: _(SystemOrigin::Root, Default::default(), caller_lookup, true) verify { - assert_last_event::(Event::ForceCreated(Default::default(), caller).into()); + assert_last_event::(Event::ForceCreated { class: Default::default(), owner: caller }.into()); } destroy { @@ -171,7 +171,7 @@ benchmarks_instance_pallet! { let witness = Class::::get(class).unwrap().destroy_witness(); }: _(SystemOrigin::Signed(caller), class, witness) verify { - assert_last_event::(Event::Destroyed(class).into()); + assert_last_event::(Event::Destroyed { class: class }.into()); } mint { @@ -179,7 +179,7 @@ benchmarks_instance_pallet! { let instance = Default::default(); }: _(SystemOrigin::Signed(caller.clone()), class, instance, caller_lookup) verify { - assert_last_event::(Event::Issued(class, instance, caller).into()); + assert_last_event::(Event::Issued { class, instance, owner: caller }.into()); } burn { @@ -187,7 +187,7 @@ benchmarks_instance_pallet! { let (instance, ..) = mint_instance::(0); }: _(SystemOrigin::Signed(caller.clone()), class, instance, Some(caller_lookup)) verify { - assert_last_event::(Event::Burned(class, instance, caller).into()); + assert_last_event::(Event::Burned { class, instance, owner: caller }.into()); } transfer { @@ -198,7 +198,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller.clone()), class, instance, target_lookup) verify { - assert_last_event::(Event::Transferred(class, instance, caller, target).into()); + assert_last_event::(Event::Transferred { class, instance, from: caller, to: target }.into()); } redeposit { @@ -217,7 +217,7 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(caller.clone()), class, instances.clone()) verify { - assert_last_event::(Event::Redeposited(class, instances).into()); + assert_last_event::(Event::Redeposited { class, successful_instances: instances }.into()); } freeze { @@ -225,7 +225,7 @@ benchmarks_instance_pallet! { let (instance, ..) = mint_instance::(Default::default()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), Default::default()) verify { - assert_last_event::(Event::Frozen(Default::default(), Default::default()).into()); + assert_last_event::(Event::Frozen { class: Default::default(), instance: Default::default() }.into()); } thaw { @@ -238,14 +238,14 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(caller.clone()), class, instance) verify { - assert_last_event::(Event::Thawed(class, instance).into()); + assert_last_event::(Event::Thawed { class, instance }.into()); } freeze_class { let (class, caller, caller_lookup) = create_class::(); }: _(SystemOrigin::Signed(caller.clone()), class) verify { - assert_last_event::(Event::ClassFrozen(class).into()); + assert_last_event::(Event::ClassFrozen { class }.into()); } thaw_class { @@ -254,7 +254,7 @@ benchmarks_instance_pallet! { Uniques::::freeze_class(origin, class)?; }: _(SystemOrigin::Signed(caller.clone()), class) verify { - assert_last_event::(Event::ClassThawed(class).into()); + assert_last_event::(Event::ClassThawed { class }.into()); } transfer_ownership { @@ -264,7 +264,7 @@ benchmarks_instance_pallet! { T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); }: _(SystemOrigin::Signed(caller), class, target_lookup) verify { - assert_last_event::(Event::OwnerChanged(class, target).into()); + assert_last_event::(Event::OwnerChanged { class, new_owner: target }.into()); } set_team { @@ -274,12 +274,12 @@ benchmarks_instance_pallet! { let target2 = T::Lookup::unlookup(account("target", 2, SEED)); }: _(SystemOrigin::Signed(caller), Default::default(), target0.clone(), target1.clone(), target2.clone()) verify { - assert_last_event::(Event::TeamChanged( + assert_last_event::(Event::TeamChanged{ class, - account("target", 0, SEED), - account("target", 1, SEED), - account("target", 2, SEED), - ).into()); + issuer: account("target", 0, SEED), + admin: account("target", 1, SEED), + freezer: account("target", 2, SEED), + }.into()); } force_asset_status { @@ -296,7 +296,7 @@ benchmarks_instance_pallet! { }; }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::AssetStatusChanged(class).into()); + assert_last_event::(Event::AssetStatusChanged { class }.into()); } set_attribute { @@ -308,7 +308,7 @@ benchmarks_instance_pallet! { add_instance_metadata::(instance); }: _(SystemOrigin::Signed(caller), class, Some(instance), key.clone(), value.clone()) verify { - assert_last_event::(Event::AttributeSet(class, Some(instance), key, value).into()); + assert_last_event::(Event::AttributeSet { class, maybe_instance: Some(instance), key, value }.into()); } clear_attribute { @@ -318,7 +318,7 @@ benchmarks_instance_pallet! { let (key, ..) = add_instance_attribute::(instance); }: _(SystemOrigin::Signed(caller), class, Some(instance), key.clone()) verify { - assert_last_event::(Event::AttributeCleared(class, Some(instance), key).into()); + assert_last_event::(Event::AttributeCleared { class, maybe_instance: Some(instance), key }.into()); } set_metadata { @@ -328,7 +328,7 @@ benchmarks_instance_pallet! { let (instance, ..) = mint_instance::(0); }: _(SystemOrigin::Signed(caller), class, instance, data.clone(), false) verify { - assert_last_event::(Event::MetadataSet(class, instance, data, false).into()); + assert_last_event::(Event::MetadataSet { class, instance, data, is_frozen: false }.into()); } clear_metadata { @@ -337,7 +337,7 @@ benchmarks_instance_pallet! { add_instance_metadata::(instance); }: _(SystemOrigin::Signed(caller), class, instance) verify { - assert_last_event::(Event::MetadataCleared(class, instance).into()); + assert_last_event::(Event::MetadataCleared { class, instance }.into()); } set_class_metadata { @@ -346,7 +346,7 @@ benchmarks_instance_pallet! { let (class, caller, _) = create_class::(); }: _(SystemOrigin::Signed(caller), class, data.clone(), false) verify { - assert_last_event::(Event::ClassMetadataSet(class, data, false).into()); + assert_last_event::(Event::ClassMetadataSet { class, data, is_frozen: false }.into()); } clear_class_metadata { @@ -354,7 +354,7 @@ benchmarks_instance_pallet! { add_class_metadata::(); }: _(SystemOrigin::Signed(caller), class) verify { - assert_last_event::(Event::ClassMetadataCleared(class).into()); + assert_last_event::(Event::ClassMetadataCleared { class }.into()); } approve_transfer { @@ -364,7 +364,7 @@ benchmarks_instance_pallet! { let delegate_lookup = T::Lookup::unlookup(delegate.clone()); }: _(SystemOrigin::Signed(caller.clone()), class, instance, delegate_lookup) verify { - assert_last_event::(Event::ApprovedTransfer(class, instance, caller, delegate).into()); + assert_last_event::(Event::ApprovedTransfer { class, instance, owner: caller, delegate }.into()); } cancel_approval { @@ -376,7 +376,7 @@ benchmarks_instance_pallet! { Uniques::::approve_transfer(origin, class, instance, delegate_lookup.clone())?; }: _(SystemOrigin::Signed(caller.clone()), class, instance, Some(delegate_lookup)) verify { - assert_last_event::(Event::ApprovalCancelled(class, instance, caller, delegate).into()); + assert_last_event::(Event::ApprovalCancelled { class, instance, owner: caller, delegate }.into()); } impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index 68acf7f1879fb..43d634ad569e7 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -44,7 +44,12 @@ impl, I: 'static> Pallet { details.owner = dest; Asset::::insert(&class, &instance, &details); - Self::deposit_event(Event::Transferred(class, instance, origin, details.owner)); + Self::deposit_event(Event::Transferred { + class, + instance, + from: origin, + to: details.owner, + }); Ok(()) } @@ -105,7 +110,7 @@ impl, I: 'static> Pallet { Attribute::::remove_prefix((&class,), None); T::Currency::unreserve(&class_details.owner, class_details.total_deposit); - Self::deposit_event(Event::Destroyed(class)); + Self::deposit_event(Event::Destroyed { class }); Ok(DestroyWitness { instances: class_details.instances, @@ -146,7 +151,7 @@ impl, I: 'static> Pallet { Ok(()) })?; - Self::deposit_event(Event::Issued(class, instance, owner)); + Self::deposit_event(Event::Issued { class, instance, owner }); Ok(()) } @@ -174,7 +179,7 @@ impl, I: 'static> Pallet { Asset::::remove(&class, &instance); Account::::remove((&owner, &class, &instance)); - Self::deposit_event(Event::Burned(class, instance, owner)); + Self::deposit_event(Event::Burned { class, instance, owner }); Ok(()) } } diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index 5394f02160e3c..72aa1dd0d4cb1 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -98,7 +98,7 @@ impl, I: 'static> Create<::AccountId> for Pallet admin.clone(), T::ClassDeposit::get(), false, - Event::Created(class.clone(), who.clone(), admin.clone()), + Event::Created { class: class.clone(), creator: who.clone(), owner: admin.clone() }, ) } } diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 1bf220e4a7876..7e380459252e7 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -191,63 +191,90 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// An asset class was created. \[ class, creator, owner \] - Created(T::ClassId, T::AccountId, T::AccountId), - /// An asset class was force-created. \[ class, owner \] - ForceCreated(T::ClassId, T::AccountId), - /// An asset `class` was destroyed. \[ class \] - Destroyed(T::ClassId), - /// An asset `instance` was issued. \[ class, instance, owner \] - Issued(T::ClassId, T::InstanceId, T::AccountId), - /// An asset `instance` was transferred. \[ class, instance, from, to \] - Transferred(T::ClassId, T::InstanceId, T::AccountId, T::AccountId), - /// An asset `instance` was destroyed. \[ class, instance, owner \] - Burned(T::ClassId, T::InstanceId, T::AccountId), - /// Some asset `instance` was frozen. \[ class, instance \] - Frozen(T::ClassId, T::InstanceId), - /// Some asset `instance` was thawed. \[ class, instance \] - Thawed(T::ClassId, T::InstanceId), - /// Some asset `class` was frozen. \[ class \] - ClassFrozen(T::ClassId), - /// Some asset `class` was thawed. \[ class \] - ClassThawed(T::ClassId), - /// The owner changed \[ class, new_owner \] - OwnerChanged(T::ClassId, T::AccountId), - /// The management team changed \[ class, issuer, admin, freezer \] - TeamChanged(T::ClassId, T::AccountId, T::AccountId, T::AccountId), + /// An asset class was created. + Created { class: T::ClassId, creator: T::AccountId, owner: T::AccountId }, + /// An asset class was force-created. + ForceCreated { class: T::ClassId, owner: T::AccountId }, + /// An asset `class` was destroyed. + Destroyed { class: T::ClassId }, + /// An asset `instance` was issued. + Issued { class: T::ClassId, instance: T::InstanceId, owner: T::AccountId }, + /// An asset `instance` was transferred. + Transferred { + class: T::ClassId, + instance: T::InstanceId, + from: T::AccountId, + to: T::AccountId, + }, + /// An asset `instance` was destroyed. + Burned { class: T::ClassId, instance: T::InstanceId, owner: T::AccountId }, + /// Some asset `instance` was frozen. + Frozen { class: T::ClassId, instance: T::InstanceId }, + /// Some asset `instance` was thawed. + Thawed { class: T::ClassId, instance: T::InstanceId }, + /// Some asset `class` was frozen. + ClassFrozen { class: T::ClassId }, + /// Some asset `class` was thawed. + ClassThawed { class: T::ClassId }, + /// The owner changed. + OwnerChanged { class: T::ClassId, new_owner: T::AccountId }, + /// The management team changed. + TeamChanged { + class: T::ClassId, + issuer: T::AccountId, + admin: T::AccountId, + freezer: T::AccountId, + }, /// An `instance` of an asset `class` has been approved by the `owner` for transfer by a /// `delegate`. - /// \[ class, instance, owner, delegate \] - ApprovedTransfer(T::ClassId, T::InstanceId, T::AccountId, T::AccountId), + ApprovedTransfer { + class: T::ClassId, + instance: T::InstanceId, + owner: T::AccountId, + delegate: T::AccountId, + }, /// An approval for a `delegate` account to transfer the `instance` of an asset `class` was /// cancelled by its `owner`. - /// \[ class, instance, owner, delegate \] - ApprovalCancelled(T::ClassId, T::InstanceId, T::AccountId, T::AccountId), + ApprovalCancelled { + class: T::ClassId, + instance: T::InstanceId, + owner: T::AccountId, + delegate: T::AccountId, + }, /// An asset `class` has had its attributes changed by the `Force` origin. - /// \[ class \] - AssetStatusChanged(T::ClassId), - /// New metadata has been set for an asset class. \[ class, data, is_frozen \] - ClassMetadataSet(T::ClassId, BoundedVec, bool), - /// Metadata has been cleared for an asset class. \[ class \] - ClassMetadataCleared(T::ClassId), + AssetStatusChanged { class: T::ClassId }, + /// New metadata has been set for an asset class. + ClassMetadataSet { + class: T::ClassId, + data: BoundedVec, + is_frozen: bool, + }, + /// Metadata has been cleared for an asset class. + ClassMetadataCleared { class: T::ClassId }, /// New metadata has been set for an asset instance. - /// \[ class, instance, data, is_frozen \] - MetadataSet(T::ClassId, T::InstanceId, BoundedVec, bool), - /// Metadata has been cleared for an asset instance. \[ class, instance \] - MetadataCleared(T::ClassId, T::InstanceId), - /// Metadata has been cleared for an asset instance. \[ class, successful_instances \] - Redeposited(T::ClassId, Vec), + MetadataSet { + class: T::ClassId, + instance: T::InstanceId, + data: BoundedVec, + is_frozen: bool, + }, + /// Metadata has been cleared for an asset instance. + MetadataCleared { class: T::ClassId, instance: T::InstanceId }, + /// Metadata has been cleared for an asset instance. + Redeposited { class: T::ClassId, successful_instances: Vec }, /// New attribute metadata has been set for an asset class or instance. - /// \[ class, maybe_instance, key, value \] - AttributeSet( - T::ClassId, - Option, - BoundedVec, - BoundedVec, - ), + AttributeSet { + class: T::ClassId, + maybe_instance: Option, + key: BoundedVec, + value: BoundedVec, + }, /// Attribute metadata has been cleared for an asset class or instance. - /// \[ class, maybe_instance, key, maybe_value \] - AttributeCleared(T::ClassId, Option, BoundedVec), + AttributeCleared { + class: T::ClassId, + maybe_instance: Option, + key: BoundedVec, + }, } #[pallet::error] @@ -317,7 +344,7 @@ pub mod pallet { admin.clone(), T::ClassDeposit::get(), false, - Event::Created(class, owner, admin), + Event::Created { class, creator: owner, owner: admin }, ) } @@ -353,7 +380,7 @@ pub mod pallet { owner.clone(), Zero::zero(), free_holding, - Event::ForceCreated(class, owner), + Event::ForceCreated { class, owner }, ) } @@ -549,7 +576,10 @@ pub mod pallet { } Class::::insert(&class, &class_details); - Self::deposit_event(Event::::Redeposited(class, successful)); + Self::deposit_event(Event::::Redeposited { + class, + successful_instances: successful, + }); Ok(()) } @@ -580,7 +610,7 @@ pub mod pallet { details.is_frozen = true; Asset::::insert(&class, &instance, &details); - Self::deposit_event(Event::::Frozen(class, instance)); + Self::deposit_event(Event::::Frozen { class, instance }); Ok(()) } @@ -610,7 +640,7 @@ pub mod pallet { details.is_frozen = false; Asset::::insert(&class, &instance, &details); - Self::deposit_event(Event::::Thawed(class, instance)); + Self::deposit_event(Event::::Thawed { class, instance }); Ok(()) } @@ -636,7 +666,7 @@ pub mod pallet { details.is_frozen = true; - Self::deposit_event(Event::::ClassFrozen(class)); + Self::deposit_event(Event::::ClassFrozen { class }); Ok(()) }) } @@ -663,7 +693,7 @@ pub mod pallet { details.is_frozen = false; - Self::deposit_event(Event::::ClassThawed(class)); + Self::deposit_event(Event::::ClassThawed { class }); Ok(()) }) } @@ -703,7 +733,7 @@ pub mod pallet { )?; details.owner = owner.clone(); - Self::deposit_event(Event::OwnerChanged(class, owner)); + Self::deposit_event(Event::OwnerChanged { class, new_owner: owner }); Ok(()) }) } @@ -741,7 +771,7 @@ pub mod pallet { details.admin = admin.clone(); details.freezer = freezer.clone(); - Self::deposit_event(Event::TeamChanged(class, issuer, admin, freezer)); + Self::deposit_event(Event::TeamChanged { class, issuer, admin, freezer }); Ok(()) }) } @@ -783,7 +813,12 @@ pub mod pallet { Asset::::insert(&class, &instance, &details); let delegate = details.approved.expect("set as Some above; qed"); - Self::deposit_event(Event::ApprovedTransfer(class, instance, details.owner, delegate)); + Self::deposit_event(Event::ApprovedTransfer { + class, + instance, + owner: details.owner, + delegate, + }); Ok(()) } @@ -829,7 +864,12 @@ pub mod pallet { } Asset::::insert(&class, &instance, &details); - Self::deposit_event(Event::ApprovalCancelled(class, instance, details.owner, old)); + Self::deposit_event(Event::ApprovalCancelled { + class, + instance, + owner: details.owner, + delegate: old, + }); Ok(()) } @@ -874,7 +914,7 @@ pub mod pallet { asset.is_frozen = is_frozen; *maybe_asset = Some(asset); - Self::deposit_event(Event::AssetStatusChanged(class)); + Self::deposit_event(Event::AssetStatusChanged { class }); Ok(()) }) } @@ -940,7 +980,7 @@ pub mod pallet { Attribute::::insert((&class, maybe_instance, &key), (&value, deposit)); Class::::insert(class, &class_details); - Self::deposit_event(Event::AttributeSet(class, maybe_instance, key, value)); + Self::deposit_event(Event::AttributeSet { class, maybe_instance, key, value }); Ok(()) } @@ -988,7 +1028,7 @@ pub mod pallet { class_details.total_deposit.saturating_reduce(deposit); T::Currency::unreserve(&class_details.owner, deposit); Class::::insert(class, &class_details); - Self::deposit_event(Event::AttributeCleared(class, maybe_instance, key)); + Self::deposit_event(Event::AttributeCleared { class, maybe_instance, key }); } Ok(()) } @@ -1053,7 +1093,7 @@ pub mod pallet { *metadata = Some(InstanceMetadata { deposit, data: data.clone(), is_frozen }); Class::::insert(&class, &class_details); - Self::deposit_event(Event::MetadataSet(class, instance, data, is_frozen)); + Self::deposit_event(Event::MetadataSet { class, instance, data, is_frozen }); Ok(()) }) } @@ -1098,7 +1138,7 @@ pub mod pallet { class_details.total_deposit.saturating_reduce(deposit); Class::::insert(&class, &class_details); - Self::deposit_event(Event::MetadataCleared(class, instance)); + Self::deposit_event(Event::MetadataCleared { class, instance }); Ok(()) }) } @@ -1158,7 +1198,7 @@ pub mod pallet { *metadata = Some(ClassMetadata { deposit, data: data.clone(), is_frozen }); - Self::deposit_event(Event::ClassMetadataSet(class, data, is_frozen)); + Self::deposit_event(Event::ClassMetadataSet { class, data, is_frozen }); Ok(()) }) } @@ -1195,7 +1235,7 @@ pub mod pallet { let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; T::Currency::unreserve(&details.owner, deposit); - Self::deposit_event(Event::ClassMetadataCleared(class)); + Self::deposit_event(Event::ClassMetadataCleared { class }); Ok(()) }) } diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 241526cef2230..14d8a66514e36 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -109,8 +109,8 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as - /// well as the error. \[index, error\] - BatchInterrupted(u32, DispatchError), + /// well as the error. + BatchInterrupted { index: u32, error: DispatchError }, /// Batch of dispatches completed fully with no error. BatchCompleted, /// A single item within a Batch of dispatches has completed with no error. @@ -217,7 +217,10 @@ pub mod pallet { // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { - Self::deposit_event(Event::BatchInterrupted(index as u32, e.error)); + Self::deposit_event(Event::BatchInterrupted { + index: index as u32, + error: e.error, + }); // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index f4d09a30ec078..32582fae82116 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -339,8 +339,11 @@ fn batch_with_signed_filters() { vec![Call::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 })] ),); System::assert_last_event( - utility::Event::BatchInterrupted(0, frame_system::Error::::CallFiltered.into()) - .into(), + utility::Event::BatchInterrupted { + index: 0, + error: frame_system::Error::::CallFiltered.into(), + } + .into(), ); }); } @@ -411,7 +414,7 @@ fn batch_handles_weight_refund() { let result = call.dispatch(Origin::signed(1)); assert_ok!(result); System::assert_last_event( - utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), ); // No weight is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -426,7 +429,7 @@ fn batch_handles_weight_refund() { let result = call.dispatch(Origin::signed(1)); assert_ok!(result); System::assert_last_event( - utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), ); assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); @@ -439,7 +442,7 @@ fn batch_handles_weight_refund() { let result = call.dispatch(Origin::signed(1)); assert_ok!(result); System::assert_last_event( - utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), ); assert_eq!( extract_actual_weight(&result, &info), @@ -587,8 +590,11 @@ fn batch_all_does_not_nest() { // and balances. assert_ok!(Utility::batch_all(Origin::signed(1), vec![batch_nested])); System::assert_has_event( - utility::Event::BatchInterrupted(0, frame_system::Error::::CallFiltered.into()) - .into(), + utility::Event::BatchInterrupted { + index: 0, + error: frame_system::Error::::CallFiltered.into(), + } + .into(), ); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 654723d009fab..6857918bc9a1c 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -104,9 +104,9 @@ enum VestingAction { /// Do not actively remove any schedules. Passive, /// Remove the schedule specified by the index. - Remove(usize), + Remove { index: usize }, /// Remove the two schedules, specified by index, so they can be merged. - Merge(usize, usize), + Merge { index1: usize, index2: usize }, } impl VestingAction { @@ -114,8 +114,8 @@ impl VestingAction { fn should_remove(&self, index: usize) -> bool { match self { Self::Passive => false, - Self::Remove(index1) => *index1 == index, - Self::Merge(index1, index2) => *index1 == index || *index2 == index, + Self::Remove { index: index1 } => *index1 == index, + Self::Merge { index1, index2 } => *index1 == index || *index2 == index, } } @@ -279,10 +279,9 @@ pub mod pallet { pub enum Event { /// The amount vested has been updated. This could indicate a change in funds available. /// The balance given is the amount which is left unvested (and thus locked). - /// \[account, unvested\] - VestingUpdated(T::AccountId, BalanceOf), + VestingUpdated { account: T::AccountId, unvested: BalanceOf }, /// An \[account\] has become fully vested. - VestingCompleted(T::AccountId), + VestingCompleted { account: T::AccountId }, } /// Error for the vesting pallet. @@ -450,7 +449,8 @@ pub mod pallet { let schedule2_index = schedule2_index as usize; let schedules = Self::vesting(&who).ok_or(Error::::NotVesting)?; - let merge_action = VestingAction::Merge(schedule1_index, schedule2_index); + let merge_action = + VestingAction::Merge { index1: schedule1_index, index2: schedule2_index }; let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), merge_action)?; @@ -590,11 +590,14 @@ impl Pallet { fn write_lock(who: &T::AccountId, total_locked_now: BalanceOf) { if total_locked_now.is_zero() { T::Currency::remove_lock(VESTING_ID, who); - Self::deposit_event(Event::::VestingCompleted(who.clone())); + Self::deposit_event(Event::::VestingCompleted { account: who.clone() }); } else { let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, who, total_locked_now, reasons); - Self::deposit_event(Event::::VestingUpdated(who.clone(), total_locked_now)); + Self::deposit_event(Event::::VestingUpdated { + account: who.clone(), + unvested: total_locked_now, + }); }; } @@ -637,7 +640,7 @@ impl Pallet { action: VestingAction, ) -> Result<(Vec, T::BlockNumber>>, BalanceOf), DispatchError> { let (schedules, locked_now) = match action { - VestingAction::Merge(idx1, idx2) => { + VestingAction::Merge { index1: idx1, index2: idx2 } => { // The schedule index is based off of the schedule ordering prior to filtering out // any schedules that may be ending at this block. let schedule1 = *schedules.get(idx1).ok_or(Error::::ScheduleIndexOutOfBounds)?; @@ -762,7 +765,7 @@ where /// Remove a vesting schedule for a given account. fn remove_vesting_schedule(who: &T::AccountId, schedule_index: u32) -> DispatchResult { let schedules = Self::vesting(who).ok_or(Error::::NotVesting)?; - let remove_action = VestingAction::Remove(schedule_index as usize); + let remove_action = VestingAction::Remove { index: schedule_index as usize }; let (schedules, locked_now) = Self::exec_action(schedules.to_vec(), remove_action)?; From 7aa029ae4101ab61aa1d5d8a043cdf53b853225d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Nov 2021 19:39:11 +0000 Subject: [PATCH 099/162] Bump tokio from 1.12.0 to 1.13.0 (#10265) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.12.0 to 1.13.0. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.12.0...tokio-1.13.0) --- updated-dependencies: - dependency-name: tokio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- bin/node/cli/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/offchain/Cargo.toml | 2 +- client/rpc-servers/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/service/test/Cargo.toml | 2 +- test-utils/test-crate/Cargo.toml | 2 +- test-utils/test-runner/Cargo.toml | 2 +- utils/frame/remote-externalities/Cargo.toml | 2 +- utils/frame/rpc/support/Cargo.toml | 2 +- utils/prometheus/Cargo.toml | 4 ++-- 14 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57bf1278822c0..99b2b53a74a4a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10545,9 +10545,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2c2416fdedca8443ae44b4527de1ea633af61d8f7169ffa6e72c5b53d24efcc" +checksum = "588b2d10a336da58d877567cd8fb8a14b463e2104910f8132cd054b4b96e29ee" dependencies = [ "autocfg 1.0.1", "bytes 1.0.1", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 122f2d0c2c8fc..7529138c7f9d7 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -129,7 +129,7 @@ platforms = "1.1" async-std = { version = "1.10.0", features = ["attributes"] } soketto = "0.4.2" criterion = { version = "0.3.5", features = [ "async_tokio" ] } -tokio = { version = "1.10", features = ["macros", "time"] } +tokio = { version = "1.13", features = ["macros", "time"] } jsonrpsee-ws-client = "0.4.1" wait-timeout = "0.2" remote-externalities = { path = "../../../utils/frame/remote-externalities" } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index b027063b109a8..2855e63cdc6a0 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" regex = "1.5.4" -tokio = { version = "1.10", features = [ "signal", "rt-multi-thread" ] } +tokio = { version = "1.13", features = [ "signal", "rt-multi-thread" ] } futures = "0.3.9" fdlimit = "0.2.1" libp2p = "0.39.1" diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index a7679f53ea9e8..90edc15863cdb 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -45,7 +45,7 @@ sp-timestamp = { path = "../../../primitives/timestamp", version = "4.0.0-dev" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.10.0-dev" } [dev-dependencies] -tokio = { version = "1.10.0", features = ["rt-multi-thread", "macros"] } +tokio = { version = "1.13.0", features = ["rt-multi-thread", "macros"] } sc-basic-authorship = { path = "../../basic-authorship", version = "0.10.0-dev" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client", version = "2.0.0" } substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool", version = "2.0.0" } diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 43511ea59f146..ec4bac715ad40 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -57,5 +57,5 @@ sc-network-test = { version = "0.8.0", path = "../network/test" } sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } -tokio = "1.10" +tokio = "1.13" tempfile = "3.1.0" diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index c93eec85888dc..2a6fdddd7ad36 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -43,7 +43,7 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/a sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -tokio = "1.10" +tokio = "1.13" lazy_static = "1.4.0" [features] diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index f0ae6172d6d9e..3be2380785064 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -19,7 +19,7 @@ pubsub = { package = "jsonrpc-pubsub", version = "18.0.0" } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev"} serde_json = "1.0.68" -tokio = "1.10" +tokio = "1.13" http = { package = "jsonrpc-http-server", version = "18.0.0" } ipc = { package = "jsonrpc-ipc-server", version = "18.0.0" } ws = { package = "jsonrpc-ws-server", version = "18.0.0" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 38729fd4ba5ce..a66a4fc67680e 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -78,7 +78,7 @@ parity-util-mem = { version = "0.10.2", default-features = false, features = [ "primitive-types", ] } async-trait = "0.1.50" -tokio = { version = "1.10", features = ["time", "rt-multi-thread"] } +tokio = { version = "1.13", features = ["time", "rt-multi-thread"] } tempfile = "3.1.0" directories = "4.0.1" diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 114473b66c975..fe953a53bdd04 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] hex = "0.4" hex-literal = "0.3.4" tempfile = "3.1.0" -tokio = { version = "1.10.0", features = ["time"] } +tokio = { version = "1.13.0", features = ["time"] } log = "0.4.8" fdlimit = "0.2.1" parking_lot = "0.11.1" diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index 071a82f3c769f..4621332ccc0c1 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -12,6 +12,6 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -tokio = { version = "1.10", features = ["macros"] } +tokio = { version = "1.13", features = ["macros"] } test-utils = { version = "4.0.0-dev", path = "..", package = "substrate-test-utils" } sc-service = { version = "0.10.0-dev", path = "../../client/service" } diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml index 9299076bb1f68..7066fc778cbdd 100644 --- a/test-utils/test-runner/Cargo.toml +++ b/test-utils/test-runner/Cargo.toml @@ -48,7 +48,7 @@ frame-system = { path = "../../frame/system" } log = "0.4.8" futures = "0.3.16" -tokio = { version = "1.10", features = ["signal"] } +tokio = { version = "1.13", features = ["signal"] } # Calling RPC jsonrpc-core = "18.0" num-traits = "0.2.14" diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 8f54dd01df0a3..105ab1739f5b6 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-version = { version = "4.0.0-dev", path = "../../../primitives/version" } [dev-dependencies] -tokio = { version = "1.10", features = ["macros", "rt-multi-thread"] } +tokio = { version = "1.13", features = ["macros", "rt-multi-thread"] } pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", version = "5.0.0-dev" } frame-support = { path = "../../../frame/support", version = "4.0.0-dev" } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 0d21bdd6c0181..dbaa12ee5ddb9 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -26,4 +26,4 @@ sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } [dev-dependencies] frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } scale-info = "1.0" -tokio = "1.10" +tokio = "1.13" diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 390a1c733cbd7..e2104ec5d55aa 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -18,9 +18,9 @@ prometheus = { version = "0.13.0", default-features = false } futures-util = { version = "0.3.17", default-features = false, features = ["io"] } derive_more = "0.99" async-std = { version = "1.10.0", features = ["unstable"] } -tokio = "1.10" +tokio = "1.13" hyper = { version = "0.14.14", default-features = false, features = ["http1", "server", "tcp"] } [dev-dependencies] hyper = { version = "0.14.14", features = ["client"] } -tokio = { version = "1.10", features = ["rt-multi-thread"] } +tokio = { version = "1.13", features = ["rt-multi-thread"] } From bc4cb49f4b3993887e6e9a8f6a7f649928f92689 Mon Sep 17 00:00:00 2001 From: dharjeezy Date: Mon, 15 Nov 2021 20:41:41 +0100 Subject: [PATCH 100/162] client/service: refactor group param of spawn into an Enum (#10248) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refractored group param to enum * refractored group param to enum * changed group param to GroupName enum for other methods too such as spawn_inner updated docs * revert on task type * put back ticks in GroupName * Update client/service/src/task_manager/mod.rs Co-authored-by: Bastian Köcher * document group name change specific to actual in enum declaration * change documentation * Update client/service/src/task_manager/mod.rs Co-authored-by: Andronik Ordian * changed Actual to Specific * Update client/service/src/task_manager/mod.rs Co-authored-by: Andronik Ordian * Update client/service/src/task_manager/mod.rs Co-authored-by: Bastian Köcher * Update client/service/src/task_manager/mod.rs Co-authored-by: Bastian Köcher * Update client/service/src/task_manager/mod.rs Co-authored-by: Bastian Köcher Co-authored-by: Damilare Co-authored-by: Bastian Köcher Co-authored-by: Andronik Ordian --- client/service/src/task_manager/mod.rs | 50 ++++++++++++++++++++------ 1 file changed, 40 insertions(+), 10 deletions(-) diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index 64c00226073c7..25d3ecd7d5c3c 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -41,6 +41,32 @@ mod tests; /// Default task group name. pub const DEFAULT_GROUP_NAME: &'static str = "default"; +/// The name of a group a task belongs to. +/// +/// This name is passed belong-side the task name to the prometheus metrics and can be used +/// to group tasks. +pub enum GroupName { + /// Sets the group name to `default`. + Default, + /// Use the specifically given name as group name. + Specific(&'static str), +} + +impl From> for GroupName { + fn from(name: Option<&'static str>) -> Self { + match name { + Some(name) => Self::Specific(name), + None => Self::Default, + } + } +} + +impl From<&'static str> for GroupName { + fn from(name: &'static str) -> Self { + Self::Specific(name) + } +} + /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { @@ -51,10 +77,10 @@ pub struct SpawnTaskHandle { } impl SpawnTaskHandle { - /// Spawns the given task with the given name and an optional group name. + /// Spawns the given task with the given name and a group name. /// If group is not specified `DEFAULT_GROUP_NAME` will be used. /// - /// Note that the `name`/`group` is a `&'static str`. The reason for this choice is that + /// Note that the `name` is a `&'static str`. The reason for this choice is that /// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and /// that therefore the set of possible task names must be bounded. /// @@ -63,7 +89,7 @@ impl SpawnTaskHandle { pub fn spawn( &self, name: &'static str, - group: Option<&'static str>, + group: impl Into, task: impl Future + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) @@ -73,7 +99,7 @@ impl SpawnTaskHandle { pub fn spawn_blocking( &self, name: &'static str, - group: Option<&'static str>, + group: impl Into, task: impl Future + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) @@ -83,7 +109,7 @@ impl SpawnTaskHandle { fn spawn_inner( &self, name: &'static str, - group: Option<&'static str>, + group: impl Into, task: impl Future + Send + 'static, task_type: TaskType, ) { @@ -94,8 +120,12 @@ impl SpawnTaskHandle { let on_exit = self.on_exit.clone(); let metrics = self.metrics.clone(); - // If no group is specified use default. - let group = group.unwrap_or(DEFAULT_GROUP_NAME); + + let group = match group.into() { + GroupName::Specific(var) => var, + // If no group is specified use default. + GroupName::Default => DEFAULT_GROUP_NAME, + }; // Note that we increase the started counter here and not within the future. This way, // we could properly visualize on Prometheus situations where the spawning doesn't work. @@ -198,7 +228,7 @@ impl SpawnEssentialTaskHandle { pub fn spawn( &self, name: &'static str, - group: Option<&'static str>, + group: impl Into, task: impl Future + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) @@ -210,7 +240,7 @@ impl SpawnEssentialTaskHandle { pub fn spawn_blocking( &self, name: &'static str, - group: Option<&'static str>, + group: impl Into, task: impl Future + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) @@ -219,7 +249,7 @@ impl SpawnEssentialTaskHandle { fn spawn_inner( &self, name: &'static str, - group: Option<&'static str>, + group: impl Into, task: impl Future + Send + 'static, task_type: TaskType, ) { From c087bbedbde16711450c186518314903a2949cb3 Mon Sep 17 00:00:00 2001 From: David Salami <31099392+Wizdave97@users.noreply.github.com> Date: Tue, 16 Nov 2021 02:56:00 +0100 Subject: [PATCH 101/162] Add field names to pallet Event variants (#9993) * convert pallet-assets events to struct types * updated events of a couple pallets * updated pallet event field names * update pallet event field names * updated events in test files * cargo fmt * minorfixes * fix assertion error * minor fix * formatting fix * fmt --- bin/node/executor/tests/basic.rs | 69 ++-- frame/assets/src/benchmarking.rs | 56 +-- frame/assets/src/functions.rs | 41 +- frame/assets/src/lib.rs | 125 +++--- frame/assets/src/tests.rs | 7 +- frame/atomic-swap/src/lib.rs | 21 +- frame/bags-list/src/lib.rs | 6 +- frame/balances/src/lib.rs | 114 +++--- frame/balances/src/tests.rs | 30 +- frame/balances/src/tests_local.rs | 10 +- frame/balances/src/tests_reentrancy.rs | 47 ++- frame/bounties/src/benchmarking.rs | 6 +- frame/bounties/src/lib.rs | 49 ++- frame/bounties/src/tests.rs | 16 +- frame/collective/src/benchmarking.rs | 16 +- frame/collective/src/lib.rs | 77 ++-- frame/collective/src/tests.rs | 373 ++++++++++++++---- frame/contracts/src/tests.rs | 38 +- frame/democracy/src/benchmarking.rs | 2 +- frame/democracy/src/lib.rs | 112 +++--- .../election-provider-multi-phase/src/lib.rs | 48 ++- .../src/signed.rs | 4 +- frame/elections-phragmen/src/lib.rs | 63 +-- frame/elections/src/lib.rs | 23 +- frame/examples/basic/src/lib.rs | 17 +- frame/examples/offchain-worker/src/lib.rs | 5 +- frame/gilt/src/lib.rs | 36 +- frame/grandpa/src/lib.rs | 10 +- frame/grandpa/src/tests.rs | 10 +- frame/identity/src/benchmarking.rs | 8 +- frame/identity/src/lib.rs | 69 ++-- frame/im-online/src/lib.rs | 12 +- frame/indices/src/lib.rs | 22 +- frame/lottery/src/lib.rs | 8 +- frame/membership/src/lib.rs | 2 +- frame/multisig/src/lib.rs | 63 +-- frame/multisig/src/tests.rs | 9 +- frame/nicks/src/lib.rs | 30 +- frame/node-authorization/src/lib.rs | 42 +- frame/offences/benchmarking/src/lib.rs | 14 +- frame/offences/src/lib.rs | 4 +- frame/offences/src/tests.rs | 10 +- frame/proxy/src/benchmarking.rs | 18 +- frame/proxy/src/lib.rs | 44 ++- frame/proxy/src/tests.rs | 62 +-- frame/staking/src/slashing.rs | 2 +- frame/staking/src/tests.rs | 4 +- frame/transaction-payment/src/lib.rs | 8 +- 48 files changed, 1181 insertions(+), 681 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index b7df7bf0bd41a..7dc6c22aa3544 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -387,24 +387,27 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Withdraw(alice().into(), fees)), + event: Event::Balances(pallet_balances::Event::Withdraw { + who: alice().into(), + amount: fees, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Transfer( - alice().into(), - bob().into(), - 69 * DOLLARS, - )), + event: Event::Balances(pallet_balances::Event::Transfer { + from: alice().into(), + to: bob().into(), + amount: 69 * DOLLARS, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Deposit( - pallet_treasury::Pallet::::account_id(), - fees * 8 / 10, - )), + event: Event::Balances(pallet_balances::Event::Deposit { + who: pallet_treasury::Pallet::::account_id(), + amount: fees * 8 / 10, + }), topics: vec![], }, EventRecord { @@ -454,24 +457,27 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Withdraw(bob().into(), fees)), + event: Event::Balances(pallet_balances::Event::Withdraw { + who: bob().into(), + amount: fees, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Transfer( - bob().into(), - alice().into(), - 5 * DOLLARS, - )), + event: Event::Balances(pallet_balances::Event::Transfer { + from: bob().into(), + to: alice().into(), + amount: 5 * DOLLARS, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Deposit( - pallet_treasury::Pallet::::account_id(), - fees * 8 / 10, - )), + event: Event::Balances(pallet_balances::Event::Deposit { + who: pallet_treasury::Pallet::::account_id(), + amount: fees * 8 / 10, + }), topics: vec![], }, EventRecord { @@ -489,24 +495,27 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Balances(pallet_balances::Event::Withdraw(alice().into(), fees)), + event: Event::Balances(pallet_balances::Event::Withdraw { + who: alice().into(), + amount: fees, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Balances(pallet_balances::Event::Transfer( - alice().into(), - bob().into(), - 15 * DOLLARS, - )), + event: Event::Balances(pallet_balances::Event::Transfer { + from: alice().into(), + to: bob().into(), + amount: 15 * DOLLARS, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Balances(pallet_balances::Event::Deposit( - pallet_treasury::Pallet::::account_id(), - fees * 8 / 10, - )), + event: Event::Balances(pallet_balances::Event::Deposit { + who: pallet_treasury::Pallet::::account_id(), + amount: fees * 8 / 10, + }), topics: vec![], }, EventRecord { diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index d9de9ed3dedd4..475864bac9430 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -155,7 +155,7 @@ benchmarks_instance_pallet! { T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, 1u32.into()) verify { - assert_last_event::(Event::Created(Default::default(), caller.clone(), caller).into()); + assert_last_event::(Event::Created { asset_id: Default::default(), creator: caller.clone(), owner: caller }.into()); } force_create { @@ -163,7 +163,7 @@ benchmarks_instance_pallet! { let caller_lookup = T::Lookup::unlookup(caller.clone()); }: _(SystemOrigin::Root, Default::default(), caller_lookup, true, 1u32.into()) verify { - assert_last_event::(Event::ForceCreated(Default::default(), caller).into()); + assert_last_event::(Event::ForceCreated { asset_id: Default::default(), owner: caller }.into()); } destroy { @@ -177,7 +177,7 @@ benchmarks_instance_pallet! { let witness = Asset::::get(T::AssetId::default()).unwrap().destroy_witness(); }: _(SystemOrigin::Signed(caller), Default::default(), witness) verify { - assert_last_event::(Event::Destroyed(Default::default()).into()); + assert_last_event::(Event::Destroyed { asset_id: Default::default() }.into()); } mint { @@ -185,7 +185,7 @@ benchmarks_instance_pallet! { let amount = T::Balance::from(100u32); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) verify { - assert_last_event::(Event::Issued(Default::default(), caller, amount).into()); + assert_last_event::(Event::Issued { asset_id: Default::default(), owner: caller, total_supply: amount }.into()); } burn { @@ -193,7 +193,7 @@ benchmarks_instance_pallet! { let (caller, caller_lookup) = create_default_minted_asset::(true, amount); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) verify { - assert_last_event::(Event::Burned(Default::default(), caller, amount).into()); + assert_last_event::(Event::Burned { asset_id: Default::default(), owner: caller, balance: amount }.into()); } transfer { @@ -203,7 +203,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) verify { - assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); + assert_last_event::(Event::Transferred { asset_id: Default::default(), from: caller, to: target, amount }.into()); } transfer_keep_alive { @@ -215,7 +215,7 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) verify { assert!(frame_system::Pallet::::account_exists(&caller)); - assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); + assert_last_event::(Event::Transferred { asset_id: Default::default(), from: caller, to: target, amount }.into()); } force_transfer { @@ -226,7 +226,7 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, target_lookup, amount) verify { assert_last_event::( - Event::Transferred(Default::default(), caller, target, amount).into() + Event::Transferred { asset_id: Default::default(), from: caller, to: target, amount }.into() ); } @@ -234,7 +234,7 @@ benchmarks_instance_pallet! { let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) verify { - assert_last_event::(Event::Frozen(Default::default(), caller).into()); + assert_last_event::(Event::Frozen { asset_id: Default::default(), who: caller }.into()); } thaw { @@ -246,14 +246,14 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) verify { - assert_last_event::(Event::Thawed(Default::default(), caller).into()); + assert_last_event::(Event::Thawed { asset_id: Default::default(), who: caller }.into()); } freeze_asset { let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); }: _(SystemOrigin::Signed(caller.clone()), Default::default()) verify { - assert_last_event::(Event::AssetFrozen(Default::default()).into()); + assert_last_event::(Event::AssetFrozen { asset_id: Default::default() }.into()); } thaw_asset { @@ -264,7 +264,7 @@ benchmarks_instance_pallet! { )?; }: _(SystemOrigin::Signed(caller.clone()), Default::default()) verify { - assert_last_event::(Event::AssetThawed(Default::default()).into()); + assert_last_event::(Event::AssetThawed { asset_id: Default::default() }.into()); } transfer_ownership { @@ -273,7 +273,7 @@ benchmarks_instance_pallet! { let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller), Default::default(), target_lookup) verify { - assert_last_event::(Event::OwnerChanged(Default::default(), target).into()); + assert_last_event::(Event::OwnerChanged { asset_id: Default::default(), owner: target }.into()); } set_team { @@ -283,12 +283,12 @@ benchmarks_instance_pallet! { let target2 = T::Lookup::unlookup(account("target", 2, SEED)); }: _(SystemOrigin::Signed(caller), Default::default(), target0.clone(), target1.clone(), target2.clone()) verify { - assert_last_event::(Event::TeamChanged( - Default::default(), - account("target", 0, SEED), - account("target", 1, SEED), - account("target", 2, SEED), - ).into()); + assert_last_event::(Event::TeamChanged { + asset_id: Default::default(), + issuer: account("target", 0, SEED), + admin: account("target", 1, SEED), + freezer: account("target", 2, SEED), + }.into()); } set_metadata { @@ -304,7 +304,7 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Signed(caller), Default::default(), name.clone(), symbol.clone(), decimals) verify { let id = Default::default(); - assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); + assert_last_event::(Event::MetadataSet { asset_id: id, name, symbol, decimals, is_frozen: false }.into()); } clear_metadata { @@ -315,7 +315,7 @@ benchmarks_instance_pallet! { Assets::::set_metadata(origin, Default::default(), dummy.clone(), dummy, 12)?; }: _(SystemOrigin::Signed(caller), Default::default()) verify { - assert_last_event::(Event::MetadataCleared(Default::default()).into()); + assert_last_event::(Event::MetadataCleared { asset_id: Default::default() }.into()); } force_set_metadata { @@ -339,7 +339,7 @@ benchmarks_instance_pallet! { }: { call.dispatch_bypass_filter(origin)? } verify { let id = Default::default(); - assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); + assert_last_event::(Event::MetadataSet { asset_id: id, name, symbol, decimals, is_frozen: false }.into()); } force_clear_metadata { @@ -353,7 +353,7 @@ benchmarks_instance_pallet! { let call = Call::::force_clear_metadata { id: Default::default() }; }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::MetadataCleared(Default::default()).into()); + assert_last_event::(Event::MetadataCleared { asset_id: Default::default() }.into()); } force_asset_status { @@ -372,7 +372,7 @@ benchmarks_instance_pallet! { }; }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::AssetStatusChanged(Default::default()).into()); + assert_last_event::(Event::AssetStatusChanged { asset_id: Default::default() }.into()); } approve_transfer { @@ -385,7 +385,7 @@ benchmarks_instance_pallet! { let amount = 100u32.into(); }: _(SystemOrigin::Signed(caller.clone()), id, delegate_lookup, amount) verify { - assert_last_event::(Event::ApprovedTransfer(id, caller, delegate, amount).into()); + assert_last_event::(Event::ApprovedTransfer { asset_id: id, source: caller, delegate, amount }.into()); } transfer_approved { @@ -405,7 +405,7 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Signed(delegate.clone()), id, owner_lookup, dest_lookup, amount) verify { assert!(T::Currency::reserved_balance(&owner).is_zero()); - assert_event::(Event::Transferred(id, owner, dest, amount).into()); + assert_event::(Event::Transferred { asset_id: id, from: owner, to: dest, amount }.into()); } cancel_approval { @@ -420,7 +420,7 @@ benchmarks_instance_pallet! { Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; }: _(SystemOrigin::Signed(caller.clone()), id, delegate_lookup) verify { - assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); + assert_last_event::(Event::ApprovalCancelled { asset_id: id, owner: caller, delegate }.into()); } force_cancel_approval { @@ -435,7 +435,7 @@ benchmarks_instance_pallet! { Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; }: _(SystemOrigin::Signed(caller.clone()), id, caller_lookup, delegate_lookup) verify { - assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); + assert_last_event::(Event::ApprovalCancelled { asset_id: id, owner: caller, delegate }.into()); } impl_benchmark_test_suite!(Assets, crate::mock::new_test_ext(), crate::mock::Test) diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index a4685d88d0497..f01954cb970ee 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -275,7 +275,11 @@ impl, I: 'static> Pallet { details.supply = details.supply.saturating_add(amount); Ok(()) })?; - Self::deposit_event(Event::Issued(id, beneficiary.clone(), amount)); + Self::deposit_event(Event::Issued { + asset_id: id, + owner: beneficiary.clone(), + total_supply: amount, + }); Ok(()) } @@ -342,7 +346,7 @@ impl, I: 'static> Pallet { Ok(()) })?; - Self::deposit_event(Event::Burned(id, target.clone(), actual)); + Self::deposit_event(Event::Burned { asset_id: id, owner: target.clone(), balance: actual }); Ok(actual) } @@ -415,7 +419,12 @@ impl, I: 'static> Pallet { ) -> Result { // Early exist if no-op. if amount.is_zero() { - Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), amount)); + Self::deposit_event(Event::Transferred { + asset_id: id, + from: source.clone(), + to: dest.clone(), + amount, + }); return Ok(amount) } @@ -476,7 +485,12 @@ impl, I: 'static> Pallet { Ok(()) })?; - Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), credit)); + Self::deposit_event(Event::Transferred { + asset_id: id, + from: source.clone(), + to: dest.clone(), + amount: credit, + }); Ok(credit) } @@ -514,7 +528,7 @@ impl, I: 'static> Pallet { is_frozen: false, }, ); - Self::deposit_event(Event::ForceCreated(id, owner)); + Self::deposit_event(Event::ForceCreated { asset_id: id, owner }); Ok(()) } @@ -554,7 +568,7 @@ impl, I: 'static> Pallet { for ((owner, _), approval) in Approvals::::drain_prefix((&id,)) { T::Currency::unreserve(&owner, approval.deposit); } - Self::deposit_event(Event::Destroyed(id)); + Self::deposit_event(Event::Destroyed { asset_id: id }); Ok(DestroyWitness { accounts: details.accounts, @@ -599,7 +613,12 @@ impl, I: 'static> Pallet { }, )?; Asset::::insert(id, d); - Self::deposit_event(Event::ApprovedTransfer(id, owner.clone(), delegate.clone(), amount)); + Self::deposit_event(Event::ApprovedTransfer { + asset_id: id, + source: owner.clone(), + delegate: delegate.clone(), + amount, + }); Ok(()) } @@ -683,7 +702,13 @@ impl, I: 'static> Pallet { is_frozen: false, }); - Self::deposit_event(Event::MetadataSet(id, name, symbol, decimals, false)); + Self::deposit_event(Event::MetadataSet { + asset_id: id, + name, + symbol, + decimals, + is_frozen: false, + }); Ok(()) }) } diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index b89d411e41db8..f2bc86843ad9c 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -379,47 +379,70 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// Some asset class was created. \[asset_id, creator, owner\] - Created(T::AssetId, T::AccountId, T::AccountId), - /// Some assets were issued. \[asset_id, owner, total_supply\] - Issued(T::AssetId, T::AccountId, T::Balance), - /// Some assets were transferred. \[asset_id, from, to, amount\] - Transferred(T::AssetId, T::AccountId, T::AccountId, T::Balance), - /// Some assets were destroyed. \[asset_id, owner, balance\] - Burned(T::AssetId, T::AccountId, T::Balance), - /// The management team changed \[asset_id, issuer, admin, freezer\] - TeamChanged(T::AssetId, T::AccountId, T::AccountId, T::AccountId), - /// The owner changed \[asset_id, owner\] - OwnerChanged(T::AssetId, T::AccountId), - /// Some account `who` was frozen. \[asset_id, who\] - Frozen(T::AssetId, T::AccountId), - /// Some account `who` was thawed. \[asset_id, who\] - Thawed(T::AssetId, T::AccountId), - /// Some asset `asset_id` was frozen. \[asset_id\] - AssetFrozen(T::AssetId), - /// Some asset `asset_id` was thawed. \[asset_id\] - AssetThawed(T::AssetId), + /// Some asset class was created. + Created { asset_id: T::AssetId, creator: T::AccountId, owner: T::AccountId }, + /// Some assets were issued. + Issued { asset_id: T::AssetId, owner: T::AccountId, total_supply: T::Balance }, + /// Some assets were transferred. + Transferred { + asset_id: T::AssetId, + from: T::AccountId, + to: T::AccountId, + amount: T::Balance, + }, + /// Some assets were destroyed. + Burned { asset_id: T::AssetId, owner: T::AccountId, balance: T::Balance }, + /// The management team changed. + TeamChanged { + asset_id: T::AssetId, + issuer: T::AccountId, + admin: T::AccountId, + freezer: T::AccountId, + }, + /// The owner changed. + OwnerChanged { asset_id: T::AssetId, owner: T::AccountId }, + /// Some account `who` was frozen. + Frozen { asset_id: T::AssetId, who: T::AccountId }, + /// Some account `who` was thawed. + Thawed { asset_id: T::AssetId, who: T::AccountId }, + /// Some asset `asset_id` was frozen. + AssetFrozen { asset_id: T::AssetId }, + /// Some asset `asset_id` was thawed. + AssetThawed { asset_id: T::AssetId }, /// An asset class was destroyed. - Destroyed(T::AssetId), - /// Some asset class was force-created. \[asset_id, owner\] - ForceCreated(T::AssetId, T::AccountId), - /// New metadata has been set for an asset. \[asset_id, name, symbol, decimals, is_frozen\] - MetadataSet(T::AssetId, Vec, Vec, u8, bool), - /// Metadata has been cleared for an asset. \[asset_id\] - MetadataCleared(T::AssetId), + Destroyed { asset_id: T::AssetId }, + /// Some asset class was force-created. + ForceCreated { asset_id: T::AssetId, owner: T::AccountId }, + /// New metadata has been set for an asset. + MetadataSet { + asset_id: T::AssetId, + name: Vec, + symbol: Vec, + decimals: u8, + is_frozen: bool, + }, + /// Metadata has been cleared for an asset. + MetadataCleared { asset_id: T::AssetId }, /// (Additional) funds have been approved for transfer to a destination account. - /// \[asset_id, source, delegate, amount\] - ApprovedTransfer(T::AssetId, T::AccountId, T::AccountId, T::Balance), + ApprovedTransfer { + asset_id: T::AssetId, + source: T::AccountId, + delegate: T::AccountId, + amount: T::Balance, + }, /// An approval for account `delegate` was cancelled by `owner`. - /// \[id, owner, delegate\] - ApprovalCancelled(T::AssetId, T::AccountId, T::AccountId), + ApprovalCancelled { asset_id: T::AssetId, owner: T::AccountId, delegate: T::AccountId }, /// An `amount` was transferred in its entirety from `owner` to `destination` by /// the approved `delegate`. - /// \[id, owner, delegate, destination\] - TransferredApproved(T::AssetId, T::AccountId, T::AccountId, T::AccountId, T::Balance), + TransferredApproved { + asset_id: T::AssetId, + owner: T::AccountId, + delegate: T::AccountId, + destination: T::AccountId, + amount: T::Balance, + }, /// An asset has had its attributes changed by the `Force` origin. - /// \[id\] - AssetStatusChanged(T::AssetId), + AssetStatusChanged { asset_id: T::AssetId }, } #[pallet::error] @@ -505,7 +528,7 @@ pub mod pallet { is_frozen: false, }, ); - Self::deposit_event(Event::Created(id, owner, admin)); + Self::deposit_event(Event::Created { asset_id: id, creator: owner, owner: admin }); Ok(()) } @@ -761,7 +784,7 @@ pub mod pallet { Account::::mutate(id, &who, |a| a.is_frozen = true); - Self::deposit_event(Event::::Frozen(id, who)); + Self::deposit_event(Event::::Frozen { asset_id: id, who }); Ok(()) } @@ -790,7 +813,7 @@ pub mod pallet { Account::::mutate(id, &who, |a| a.is_frozen = false); - Self::deposit_event(Event::::Thawed(id, who)); + Self::deposit_event(Event::::Thawed { asset_id: id, who }); Ok(()) } @@ -816,7 +839,7 @@ pub mod pallet { d.is_frozen = true; - Self::deposit_event(Event::::AssetFrozen(id)); + Self::deposit_event(Event::::AssetFrozen { asset_id: id }); Ok(()) }) } @@ -843,7 +866,7 @@ pub mod pallet { d.is_frozen = false; - Self::deposit_event(Event::::AssetThawed(id)); + Self::deposit_event(Event::::AssetThawed { asset_id: id }); Ok(()) }) } @@ -882,7 +905,7 @@ pub mod pallet { details.owner = owner.clone(); - Self::deposit_event(Event::OwnerChanged(id, owner)); + Self::deposit_event(Event::OwnerChanged { asset_id: id, owner }); Ok(()) }) } @@ -920,7 +943,7 @@ pub mod pallet { details.admin = admin.clone(); details.freezer = freezer.clone(); - Self::deposit_event(Event::TeamChanged(id, issuer, admin, freezer)); + Self::deposit_event(Event::TeamChanged { asset_id: id, issuer, admin, freezer }); Ok(()) }) } @@ -977,7 +1000,7 @@ pub mod pallet { Metadata::::try_mutate_exists(id, |metadata| { let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; T::Currency::unreserve(&d.owner, deposit); - Self::deposit_event(Event::MetadataCleared(id)); + Self::deposit_event(Event::MetadataCleared { asset_id: id }); Ok(()) }) } @@ -1024,7 +1047,13 @@ pub mod pallet { is_frozen, }); - Self::deposit_event(Event::MetadataSet(id, name, symbol, decimals, is_frozen)); + Self::deposit_event(Event::MetadataSet { + asset_id: id, + name, + symbol, + decimals, + is_frozen, + }); Ok(()) }) } @@ -1051,7 +1080,7 @@ pub mod pallet { Metadata::::try_mutate_exists(id, |metadata| { let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; T::Currency::unreserve(&d.owner, deposit); - Self::deposit_event(Event::MetadataCleared(id)); + Self::deposit_event(Event::MetadataCleared { asset_id: id }); Ok(()) }) } @@ -1103,7 +1132,7 @@ pub mod pallet { asset.is_frozen = is_frozen; *maybe_asset = Some(asset); - Self::deposit_event(Event::AssetStatusChanged(id)); + Self::deposit_event(Event::AssetStatusChanged { asset_id: id }); Ok(()) }) } @@ -1169,7 +1198,7 @@ pub mod pallet { d.approvals.saturating_dec(); Asset::::insert(id, d); - Self::deposit_event(Event::ApprovalCancelled(id, owner, delegate)); + Self::deposit_event(Event::ApprovalCancelled { asset_id: id, owner, delegate }); Ok(()) } @@ -1211,7 +1240,7 @@ pub mod pallet { d.approvals.saturating_dec(); Asset::::insert(id, d); - Self::deposit_event(Event::ApprovalCancelled(id, owner, delegate)); + Self::deposit_event(Event::ApprovalCancelled { asset_id: id, owner, delegate }); Ok(()) } diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 5250fafaa8f9a..e24a1d45215da 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -500,7 +500,12 @@ fn transferring_less_than_one_unit_is_fine() { assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 0)); - System::assert_last_event(mock::Event::Assets(crate::Event::Transferred(0, 1, 2, 0))); + System::assert_last_event(mock::Event::Assets(crate::Event::Transferred { + asset_id: 0, + from: 1, + to: 2, + amount: 0, + })); }); } diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 9cf92c3bd2337..4775cd06b1afd 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -218,13 +218,12 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Swap created. \[account, proof, swap\] - NewSwap(T::AccountId, HashedProof, PendingSwap), + /// Swap created. + NewSwap { account: T::AccountId, proof: HashedProof, swap: PendingSwap }, /// Swap claimed. The last parameter indicates whether the execution succeeds. - /// \[account, proof, success\] - SwapClaimed(T::AccountId, HashedProof, bool), - /// Swap cancelled. \[account, proof\] - SwapCancelled(T::AccountId, HashedProof), + SwapClaimed { account: T::AccountId, proof: HashedProof, success: bool }, + /// Swap cancelled. + SwapCancelled { account: T::AccountId, proof: HashedProof }, } /// Old name generated by `decl_event`. @@ -268,7 +267,7 @@ pub mod pallet { }; PendingSwaps::::insert(target.clone(), hashed_proof.clone(), swap.clone()); - Self::deposit_event(Event::NewSwap(target, hashed_proof, swap)); + Self::deposit_event(Event::NewSwap { account: target, proof: hashed_proof, swap }); Ok(()) } @@ -304,7 +303,11 @@ pub mod pallet { PendingSwaps::::remove(target.clone(), hashed_proof.clone()); - Self::deposit_event(Event::SwapClaimed(target, hashed_proof, succeeded)); + Self::deposit_event(Event::SwapClaimed { + account: target, + proof: hashed_proof, + success: succeeded, + }); Ok(()) } @@ -333,7 +336,7 @@ pub mod pallet { swap.action.cancel(&swap.source); PendingSwaps::::remove(&target, hashed_proof.clone()); - Self::deposit_event(Event::SwapCancelled(target, hashed_proof)); + Self::deposit_event(Event::SwapCancelled { account: target, proof: hashed_proof }); Ok(()) } diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 8d74ecc9bd2d1..8be1afbe29bbe 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -168,8 +168,8 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(crate) fn deposit_event)] pub enum Event { - /// Moved an account from one bag to another. \[who, from, to\]. - Rebagged(T::AccountId, VoteWeight, VoteWeight), + /// Moved an account from one bag to another. + Rebagged { who: T::AccountId, from: VoteWeight, to: VoteWeight }, } #[pallet::call] @@ -216,7 +216,7 @@ impl Pallet { let maybe_movement = list::Node::::get(&account) .and_then(|node| List::update_position_for(node, new_weight)); if let Some((from, to)) = maybe_movement { - Self::deposit_event(Event::::Rebagged(account.clone(), from, to)); + Self::deposit_event(Event::::Rebagged { who: account.clone(), from, to }); }; maybe_movement } diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index da8019583c3be..b7c64da460768 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -346,7 +346,7 @@ pub mod pallet { (account.free, account.reserved) })?; - Self::deposit_event(Event::BalanceSet(who, free, reserved)); + Self::deposit_event(Event::BalanceSet { who, free, reserved }); Ok(().into()) } @@ -454,31 +454,33 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// An account was created with some free balance. \[account, free_balance\] - Endowed(T::AccountId, T::Balance), + /// An account was created with some free balance. + Endowed { account: T::AccountId, free_balance: T::Balance }, /// An account was removed whose balance was non-zero but below ExistentialDeposit, - /// resulting in an outright loss. \[account, balance\] - DustLost(T::AccountId, T::Balance), - /// Transfer succeeded. \[from, to, value\] - Transfer(T::AccountId, T::AccountId, T::Balance), - /// A balance was set by root. \[who, free, reserved\] - BalanceSet(T::AccountId, T::Balance, T::Balance), - /// Some balance was reserved (moved from free to reserved). \[who, value\] - Reserved(T::AccountId, T::Balance), - /// Some balance was unreserved (moved from reserved to free). \[who, value\] - Unreserved(T::AccountId, T::Balance), + /// resulting in an outright loss. + DustLost { account: T::AccountId, amount: T::Balance }, + /// Transfer succeeded. + Transfer { from: T::AccountId, to: T::AccountId, amount: T::Balance }, + /// A balance was set by root. + BalanceSet { who: T::AccountId, free: T::Balance, reserved: T::Balance }, + /// Some balance was reserved (moved from free to reserved). + Reserved { who: T::AccountId, amount: T::Balance }, + /// Some balance was unreserved (moved from reserved to free). + Unreserved { who: T::AccountId, amount: T::Balance }, /// Some balance was moved from the reserve of the first account to the second account. /// Final argument indicates the destination balance type. - /// \[from, to, balance, destination_status\] - ReserveRepatriated(T::AccountId, T::AccountId, T::Balance, Status), - /// Some amount was deposited into the account (e.g. for transaction fees). \[who, - /// deposit\] - Deposit(T::AccountId, T::Balance), - /// Some amount was withdrawn from the account (e.g. for transaction fees). \[who, value\] - Withdraw(T::AccountId, T::Balance), - /// Some amount was removed from the account (e.g. for misbehavior). \[who, - /// amount_slashed\] - Slashed(T::AccountId, T::Balance), + ReserveRepatriated { + from: T::AccountId, + to: T::AccountId, + amount: T::Balance, + destination_status: Status, + }, + /// Some amount was deposited (e.g. for transaction fees). + Deposit { who: T::AccountId, amount: T::Balance }, + /// Some amount was withdrawn from the account (e.g. for transaction fees). + Withdraw { who: T::AccountId, amount: T::Balance }, + /// Some amount was removed from the account (e.g. for misbehavior). + Slashed { who: T::AccountId, amount: T::Balance }, } /// Old name generated by `decl_event`. @@ -742,7 +744,7 @@ pub struct DustCleaner, I: 'static = ()>( impl, I: 'static> Drop for DustCleaner { fn drop(&mut self) { if let Some((who, dust)) = self.0.take() { - Pallet::::deposit_event(Event::DustLost(who, dust.peek())); + Pallet::::deposit_event(Event::DustLost { account: who, amount: dust.peek() }); T::DustRemoval::on_unbalanced(dust); } } @@ -939,7 +941,7 @@ impl, I: 'static> Pallet { }); result.map(|(maybe_endowed, maybe_dust, result)| { if let Some(endowed) = maybe_endowed { - Self::deposit_event(Event::Endowed(who.clone(), endowed)); + Self::deposit_event(Event::Endowed { account: who.clone(), free_balance: endowed }); } let dust_cleaner = DustCleaner(maybe_dust.map(|dust| (who.clone(), dust))); (result, dust_cleaner) @@ -1051,12 +1053,12 @@ impl, I: 'static> Pallet { }, )?; - Self::deposit_event(Event::ReserveRepatriated( - slashed.clone(), - beneficiary.clone(), - actual, - status, - )); + Self::deposit_event(Event::ReserveRepatriated { + from: slashed.clone(), + to: beneficiary.clone(), + amount: actual, + destination_status: status, + }); Ok(actual) } } @@ -1109,7 +1111,7 @@ impl, I: 'static> fungible::Mutate for Pallet { Ok(()) })?; TotalIssuance::::mutate(|t| *t += amount); - Self::deposit_event(Event::Deposit(who.clone(), amount)); + Self::deposit_event(Event::Deposit { who: who.clone(), amount }); Ok(()) } @@ -1130,7 +1132,7 @@ impl, I: 'static> fungible::Mutate for Pallet { }, )?; TotalIssuance::::mutate(|t| *t -= actual); - Self::deposit_event(Event::Withdraw(who.clone(), amount)); + Self::deposit_event(Event::Withdraw { who: who.clone(), amount }); Ok(actual) } } @@ -1151,7 +1153,11 @@ impl, I: 'static> fungible::Unbalanced for Pallet DispatchResult { Self::mutate_account(who, |account| { account.free = amount; - Self::deposit_event(Event::BalanceSet(who.clone(), account.free, account.reserved)); + Self::deposit_event(Event::BalanceSet { + who: who.clone(), + free: account.free, + reserved: account.reserved, + }); })?; Ok(()) } @@ -1531,7 +1537,11 @@ where )?; // Emit transfer event. - Self::deposit_event(Event::Transfer(transactor.clone(), dest.clone(), value)); + Self::deposit_event(Event::Transfer { + from: transactor.clone(), + to: dest.clone(), + amount: value, + }); Ok(()) } @@ -1595,10 +1605,10 @@ where }, ) { Ok((imbalance, not_slashed)) => { - Self::deposit_event(Event::Slashed( - who.clone(), - value.saturating_sub(not_slashed), - )); + Self::deposit_event(Event::Slashed { + who: who.clone(), + amount: value.saturating_sub(not_slashed), + }); return (imbalance, not_slashed) }, Err(_) => (), @@ -1625,7 +1635,7 @@ where |account, is_new| -> Result { ensure!(!is_new, Error::::DeadAccount); account.free = account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; - Self::deposit_event(Event::Deposit(who.clone(), value)); + Self::deposit_event(Event::Deposit { who: who.clone(), amount: value }); Ok(PositiveImbalance::new(value)) }, ) @@ -1658,7 +1668,7 @@ where None => return Ok(Self::PositiveImbalance::zero()), }; - Self::deposit_event(Event::Deposit(who.clone(), value)); + Self::deposit_event(Event::Deposit { who: who.clone(), amount: value }); Ok(PositiveImbalance::new(value)) }, ) @@ -1696,7 +1706,7 @@ where account.free = new_free_account; - Self::deposit_event(Event::Withdraw(who.clone(), value)); + Self::deposit_event(Event::Withdraw { who: who.clone(), amount: value }); Ok(NegativeImbalance::new(value)) }, ) @@ -1729,7 +1739,11 @@ where SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) }; account.free = value; - Self::deposit_event(Event::BalanceSet(who.clone(), account.free, account.reserved)); + Self::deposit_event(Event::BalanceSet { + who: who.clone(), + free: account.free, + reserved: account.reserved, + }); Ok(imbalance) }, ) @@ -1773,7 +1787,7 @@ where Self::ensure_can_withdraw(&who, value.clone(), WithdrawReasons::RESERVE, account.free) })?; - Self::deposit_event(Event::Reserved(who.clone(), value)); + Self::deposit_event(Event::Reserved { who: who.clone(), amount: value }); Ok(()) } @@ -1805,7 +1819,7 @@ where }, }; - Self::deposit_event(Event::Unreserved(who.clone(), actual.clone())); + Self::deposit_event(Event::Unreserved { who: who.clone(), amount: actual.clone() }); value - actual } @@ -1846,10 +1860,10 @@ where (NegativeImbalance::new(actual), value - actual) }) { Ok((imbalance, not_slashed)) => { - Self::deposit_event(Event::Slashed( - who.clone(), - value.saturating_sub(not_slashed), - )); + Self::deposit_event(Event::Slashed { + who: who.clone(), + amount: value.saturating_sub(not_slashed), + }); return (imbalance, not_slashed) }, Err(_) => (), @@ -1992,7 +2006,7 @@ where // `actual <= to_change` and `to_change <= amount`; qed; reserves[index].amount -= actual; - Self::deposit_event(Event::Slashed(who.clone(), actual)); + Self::deposit_event(Event::Slashed { who: who.clone(), amount: actual }); (imb, value - actual) }, Err(_) => (NegativeImbalance::zero(), value), diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 6a6ebc692c34a..1f7f4dd03716d 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -314,7 +314,7 @@ macro_rules! decl_tests { <$ext_builder>::default().monied(true).build().execute_with(|| { assert_eq!(Balances::total_balance(&1), 10); assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); - System::assert_last_event(Event::Balances(crate::Event::Deposit(1, 10))); + System::assert_last_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 10 })); assert_eq!(Balances::total_balance(&1), 20); assert_eq!(>::get(), 120); }); @@ -342,7 +342,7 @@ macro_rules! decl_tests { fn balance_works() { <$ext_builder>::default().build().execute_with(|| { let _ = Balances::deposit_creating(&1, 42); - System::assert_has_event(Event::Balances(crate::Event::Deposit(1, 42))); + System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 42 })); assert_eq!(Balances::free_balance(1), 42); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::total_balance(&1), 42); @@ -444,7 +444,7 @@ macro_rules! decl_tests { let _ = Balances::withdraw( &2, 11, WithdrawReasons::TRANSFER, ExistenceRequirement::KeepAlive ); - System::assert_last_event(Event::Balances(crate::Event::Withdraw(2, 11))); + System::assert_last_event(Event::Balances(crate::Event::Withdraw { who: 2, amount: 11 })); assert_eq!(Balances::free_balance(2), 100); assert_eq!(>::get(), 100); }); @@ -505,7 +505,7 @@ macro_rules! decl_tests { assert_ok!(Balances::reserve(&1, 110)); assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Free), 0); System::assert_last_event( - Event::Balances(crate::Event::ReserveRepatriated(1, 2, 41, Status::Free)) + Event::Balances(crate::Event::ReserveRepatriated { from: 1, to: 2, amount: 41, destination_status: Status::Free }) ); assert_eq!(Balances::reserved_balance(1), 69); assert_eq!(Balances::free_balance(1), 0); @@ -724,18 +724,18 @@ macro_rules! decl_tests { System::set_block_number(2); assert_ok!(Balances::reserve(&1, 10)); - System::assert_last_event(Event::Balances(crate::Event::Reserved(1, 10))); + System::assert_last_event(Event::Balances(crate::Event::Reserved { who: 1, amount: 10 })); System::set_block_number(3); assert!(Balances::unreserve(&1, 5).is_zero()); - System::assert_last_event(Event::Balances(crate::Event::Unreserved(1, 5))); + System::assert_last_event(Event::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); System::set_block_number(4); assert_eq!(Balances::unreserve(&1, 6), 1); // should only unreserve 5 - System::assert_last_event(Event::Balances(crate::Event::Unreserved(1, 5))); + System::assert_last_event(Event::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); }); } @@ -751,8 +751,8 @@ macro_rules! decl_tests { events(), [ Event::System(system::Event::NewAccount(1)), - Event::Balances(crate::Event::Endowed(1, 100)), - Event::Balances(crate::Event::BalanceSet(1, 100, 0)), + Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -763,8 +763,8 @@ macro_rules! decl_tests { events(), [ Event::System(system::Event::KilledAccount(1)), - Event::Balances(crate::Event::DustLost(1, 99)), - Event::Balances(crate::Event::Slashed(1, 1)), + Event::Balances(crate::Event::DustLost { account: 1, amount: 99 }), + Event::Balances(crate::Event::Slashed { who: 1, amount: 1 }), ] ); }); @@ -782,8 +782,8 @@ macro_rules! decl_tests { events(), [ Event::System(system::Event::NewAccount(1)), - Event::Balances(crate::Event::Endowed(1, 100)), - Event::Balances(crate::Event::BalanceSet(1, 100, 0)), + Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -794,7 +794,7 @@ macro_rules! decl_tests { events(), [ Event::System(system::Event::KilledAccount(1)), - Event::Balances(crate::Event::Slashed(1, 100)), + Event::Balances(crate::Event::Slashed { who: 1, amount: 100 }), ] ); }); @@ -814,7 +814,7 @@ macro_rules! decl_tests { assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); - System::assert_last_event(Event::Balances(crate::Event::Slashed(1, 900))); + System::assert_last_event(Event::Balances(crate::Event::Slashed { who: 1, amount: 900 })); // SCENARIO: Slash will kill account because not enough balance left. assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index b2113a916caa5..c9de662b9e8fe 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -164,8 +164,8 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { events(), [ Event::System(system::Event::NewAccount(1)), - Event::Balances(crate::Event::Endowed(1, 100)), - Event::Balances(crate::Event::BalanceSet(1, 100, 0)), + Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -173,7 +173,7 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!(res, (NegativeImbalance::new(98), 0)); // no events - assert_eq!(events(), [Event::Balances(crate::Event::Slashed(1, 98))]); + assert_eq!(events(), [Event::Balances(crate::Event::Slashed { who: 1, amount: 98 })]); let res = Balances::slash(&1, 1); assert_eq!(res, (NegativeImbalance::new(1), 0)); @@ -182,8 +182,8 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { events(), [ Event::System(system::Event::KilledAccount(1)), - Event::Balances(crate::Event::DustLost(1, 1)), - Event::Balances(crate::Event::Slashed(1, 1)), + Event::Balances(crate::Event::DustLost { account: 1, amount: 1 }), + Event::Balances(crate::Event::Slashed { who: 1, amount: 1 }) ] ); }); diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 9a5ebb003af2c..43edd16baf3b3 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -169,9 +169,16 @@ fn transfer_dust_removal_tst1_should_work() { // Verify the events assert_eq!(System::events().len(), 12); - System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 3, 450))); - System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); - System::assert_has_event(Event::Balances(crate::Event::Deposit(1, 50))); + System::assert_has_event(Event::Balances(crate::Event::Transfer { + from: 2, + to: 3, + amount: 450, + })); + System::assert_has_event(Event::Balances(crate::Event::DustLost { + account: 2, + amount: 50, + })); + System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); }); } @@ -197,9 +204,16 @@ fn transfer_dust_removal_tst2_should_work() { // Verify the events assert_eq!(System::events().len(), 10); - System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 1, 450))); - System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); - System::assert_has_event(Event::Balances(crate::Event::Deposit(1, 50))); + System::assert_has_event(Event::Balances(crate::Event::Transfer { + from: 2, + to: 1, + amount: 450, + })); + System::assert_has_event(Event::Balances(crate::Event::DustLost { + account: 2, + amount: 50, + })); + System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); }); } @@ -234,13 +248,18 @@ fn repatriating_reserved_balance_dust_removal_should_work() { // Verify the events assert_eq!(System::events().len(), 11); - System::assert_has_event(Event::Balances(crate::Event::ReserveRepatriated( - 2, - 1, - 450, - Status::Free, - ))); - System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); - System::assert_last_event(Event::Balances(crate::Event::Deposit(1, 50))); + System::assert_has_event(Event::Balances(crate::Event::ReserveRepatriated { + from: 2, + to: 1, + amount: 450, + destination_status: Status::Free, + })); + + System::assert_has_event(Event::Balances(crate::Event::DustLost { + account: 2, + amount: 50, + })); + + System::assert_last_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); }); } diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 33af02fbb9ea0..341d019c49d47 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -172,7 +172,7 @@ benchmarks! { let bounty_id = BountyCount::::get() - 1; }: close_bounty(RawOrigin::Root, bounty_id) verify { - assert_last_event::(Event::BountyCanceled(bounty_id).into()) + assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) } extend_bounty_expiry { @@ -184,7 +184,7 @@ benchmarks! { let curator = T::Lookup::lookup(curator_lookup).map_err(<&str>::from)?; }: _(RawOrigin::Signed(curator), bounty_id, Vec::new()) verify { - assert_last_event::(Event::BountyExtended(bounty_id).into()) + assert_last_event::(Event::BountyExtended { index: bounty_id }.into()) } spend_funds { @@ -207,7 +207,7 @@ benchmarks! { verify { ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); ensure!(missed_any == false, "Missed some"); - assert_last_event::(Event::BountyBecameActive(b - 1).into()) + assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) } impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index 69380502bad3f..5c96fdcc6b98f 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -228,20 +228,20 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// New bounty proposal. \[index\] - BountyProposed(BountyIndex), - /// A bounty proposal was rejected; funds were slashed. \[index, bond\] - BountyRejected(BountyIndex, BalanceOf), - /// A bounty proposal is funded and became active. \[index\] - BountyBecameActive(BountyIndex), - /// A bounty is awarded to a beneficiary. \[index, beneficiary\] - BountyAwarded(BountyIndex, T::AccountId), - /// A bounty is claimed by beneficiary. \[index, payout, beneficiary\] - BountyClaimed(BountyIndex, BalanceOf, T::AccountId), - /// A bounty is cancelled. \[index\] - BountyCanceled(BountyIndex), - /// A bounty expiry is extended. \[index\] - BountyExtended(BountyIndex), + /// New bounty proposal. + BountyProposed { index: BountyIndex }, + /// A bounty proposal was rejected; funds were slashed. + BountyRejected { index: BountyIndex, bond: BalanceOf }, + /// A bounty proposal is funded and became active. + BountyBecameActive { index: BountyIndex }, + /// A bounty is awarded to a beneficiary. + BountyAwarded { index: BountyIndex, beneficiary: T::AccountId }, + /// A bounty is claimed by beneficiary. + BountyClaimed { index: BountyIndex, payout: BalanceOf, beneficiary: T::AccountId }, + /// A bounty is cancelled. + BountyCanceled { index: BountyIndex }, + /// A bounty expiry is extended. + BountyExtended { index: BountyIndex }, } /// Number of bounty proposals that have been made. @@ -526,7 +526,7 @@ pub mod pallet { Ok(()) })?; - Self::deposit_event(Event::::BountyAwarded(bounty_id, beneficiary)); + Self::deposit_event(Event::::BountyAwarded { index: bounty_id, beneficiary }); Ok(()) } @@ -571,7 +571,11 @@ pub mod pallet { BountyDescriptions::::remove(bounty_id); - Self::deposit_event(Event::::BountyClaimed(bounty_id, payout, beneficiary)); + Self::deposit_event(Event::::BountyClaimed { + index: bounty_id, + payout, + beneficiary, + }); Ok(()) } else { Err(Error::::UnexpectedStatus.into()) @@ -612,7 +616,10 @@ pub mod pallet { T::OnSlash::on_unbalanced(imbalance); *maybe_bounty = None; - Self::deposit_event(Event::::BountyRejected(bounty_id, value)); + Self::deposit_event(Event::::BountyRejected { + index: bounty_id, + bond: value, + }); // Return early, nothing else to do. return Ok( Some(::WeightInfo::close_bounty_proposed()).into() @@ -656,7 +663,7 @@ pub mod pallet { debug_assert!(res.is_ok()); *maybe_bounty = None; - Self::deposit_event(Event::::BountyCanceled(bounty_id)); + Self::deposit_event(Event::::BountyCanceled { index: bounty_id }); Ok(Some(::WeightInfo::close_bounty_active()).into()) }, ) @@ -696,7 +703,7 @@ pub mod pallet { Ok(()) })?; - Self::deposit_event(Event::::BountyExtended(bounty_id)); + Self::deposit_event(Event::::BountyExtended { index: bounty_id }); Ok(()) } } @@ -753,7 +760,7 @@ impl Pallet { Bounties::::insert(index, &bounty); BountyDescriptions::::insert(index, description); - Self::deposit_event(Event::::BountyProposed(index)); + Self::deposit_event(Event::::BountyProposed { index }); Ok(()) } @@ -787,7 +794,7 @@ impl pallet_treasury::SpendFunds for Pallet { bounty.value, )); - Self::deposit_event(Event::::BountyBecameActive(index)); + Self::deposit_event(Event::::BountyBecameActive { index }); false } else { *missed_any = true; diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index 96c09581fdd1e..344bb6495c3bc 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -398,7 +398,7 @@ fn propose_bounty_works() { assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); - assert_eq!(last_event(), BountiesEvent::BountyProposed(0)); + assert_eq!(last_event(), BountiesEvent::BountyProposed { index: 0 }); let deposit: u64 = 85 + 5; assert_eq!(Balances::reserved_balance(0), deposit); @@ -460,7 +460,7 @@ fn close_bounty_works() { let deposit: u64 = 80 + 5; - assert_eq!(last_event(), BountiesEvent::BountyRejected(0, deposit)); + assert_eq!(last_event(), BountiesEvent::BountyRejected { index: 0, bond: deposit }); assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 100 - deposit); @@ -692,7 +692,10 @@ fn award_and_claim_bounty_works() { assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); - assert_eq!(last_event(), BountiesEvent::BountyClaimed(0, 56, 3)); + assert_eq!( + last_event(), + BountiesEvent::BountyClaimed { index: 0, payout: 56, beneficiary: 3 } + ); assert_eq!(Balances::free_balance(4), 14); // initial 10 + fee 4 @@ -731,7 +734,10 @@ fn claim_handles_high_fee() { assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); - assert_eq!(last_event(), BountiesEvent::BountyClaimed(0, 0, 3)); + assert_eq!( + last_event(), + BountiesEvent::BountyClaimed { index: 0, payout: 0, beneficiary: 3 } + ); assert_eq!(Balances::free_balance(4), 70); // 30 + 50 - 10 assert_eq!(Balances::free_balance(3), 0); @@ -808,7 +814,7 @@ fn award_and_cancel() { assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); assert_ok!(Bounties::close_bounty(Origin::root(), 0)); - assert_eq!(last_event(), BountiesEvent::BountyCanceled(0)); + assert_eq!(last_event(), BountiesEvent::BountyCanceled { index: 0 }); assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 0); diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index c26a2b43f5b75..5ca57cf72e8fc 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -128,7 +128,7 @@ benchmarks_instance_pallet! { let proposal_hash = T::Hashing::hash_of(&proposal); // Note that execution fails due to mis-matched origin assert_last_event::( - Event::MemberExecuted(proposal_hash, Err(DispatchError::BadOrigin)).into() + Event::MemberExecuted { proposal_hash, result: Err(DispatchError::BadOrigin) }.into() ); } @@ -159,7 +159,7 @@ benchmarks_instance_pallet! { let proposal_hash = T::Hashing::hash_of(&proposal); // Note that execution fails due to mis-matched origin assert_last_event::( - Event::Executed(proposal_hash, Err(DispatchError::BadOrigin)).into() + Event::Executed { proposal_hash, result: Err(DispatchError::BadOrigin) }.into() ); } @@ -203,7 +203,7 @@ benchmarks_instance_pallet! { // New proposal is recorded assert_eq!(Collective::::proposals().len(), p as usize); let proposal_hash = T::Hashing::hash_of(&proposal); - assert_last_event::(Event::Proposed(caller, p - 1, proposal_hash, threshold).into()); + assert_last_event::(Event::Proposed { account: caller, proposal_index: p - 1, proposal_hash, threshold }.into()); } vote { @@ -359,7 +359,7 @@ benchmarks_instance_pallet! { verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Disapproved(last_hash).into()); + assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } close_early_approved { @@ -440,7 +440,7 @@ benchmarks_instance_pallet! { verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Executed(last_hash, Err(DispatchError::BadOrigin)).into()); + assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Err(DispatchError::BadOrigin) }.into()); } close_disapproved { @@ -514,7 +514,7 @@ benchmarks_instance_pallet! { }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Disapproved(last_hash).into()); + assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } close_approved { @@ -586,7 +586,7 @@ benchmarks_instance_pallet! { }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::max_value(), bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Executed(last_hash, Err(DispatchError::BadOrigin)).into()); + assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Err(DispatchError::BadOrigin) }.into()); } disapprove_proposal { @@ -634,7 +634,7 @@ benchmarks_instance_pallet! { }: _(SystemOrigin::Root, last_hash) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); - assert_last_event::(Event::Disapproved(last_hash).into()); + assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); } impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 2797d01ffcdba..26b18b6232577 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -279,27 +279,31 @@ pub mod pallet { pub enum Event, I: 'static = ()> { /// A motion (given hash) has been proposed (by given account) with a threshold (given /// `MemberCount`). - /// \[account, proposal_index, proposal_hash, threshold\] - Proposed(T::AccountId, ProposalIndex, T::Hash, MemberCount), + Proposed { + account: T::AccountId, + proposal_index: ProposalIndex, + proposal_hash: T::Hash, + threshold: MemberCount, + }, /// A motion (given hash) has been voted on by given account, leaving /// a tally (yes votes and no votes given respectively as `MemberCount`). - /// \[account, proposal_hash, voted, yes, no\] - Voted(T::AccountId, T::Hash, bool, MemberCount, MemberCount), + Voted { + account: T::AccountId, + proposal_hash: T::Hash, + voted: bool, + yes: MemberCount, + no: MemberCount, + }, /// A motion was approved by the required threshold. - /// \[proposal_hash\] - Approved(T::Hash), + Approved { proposal_hash: T::Hash }, /// A motion was not approved by the required threshold. - /// \[proposal_hash\] - Disapproved(T::Hash), + Disapproved { proposal_hash: T::Hash }, /// A motion was executed; result will be `Ok` if it returned without error. - /// \[proposal_hash, result\] - Executed(T::Hash, DispatchResult), + Executed { proposal_hash: T::Hash, result: DispatchResult }, /// A single member did some action; result will be `Ok` if it returned without error. - /// \[proposal_hash, result\] - MemberExecuted(T::Hash, DispatchResult), + MemberExecuted { proposal_hash: T::Hash, result: DispatchResult }, /// A proposal was closed because its threshold was reached or after its duration was up. - /// \[proposal_hash, yes, no\] - Closed(T::Hash, MemberCount, MemberCount), + Closed { proposal_hash: T::Hash, yes: MemberCount, no: MemberCount }, } /// Old name generated by `decl_event`. @@ -442,10 +446,10 @@ pub mod pallet { let proposal_hash = T::Hashing::hash_of(&proposal); let result = proposal.dispatch(RawOrigin::Member(who).into()); - Self::deposit_event(Event::MemberExecuted( + Self::deposit_event(Event::MemberExecuted { proposal_hash, - result.map(|_| ()).map_err(|e| e.error), - )); + result: result.map(|_| ()).map_err(|e| e.error), + }); Ok(get_result_weight(result) .map(|w| { @@ -521,10 +525,10 @@ pub mod pallet { if threshold < 2 { let seats = Self::members().len() as MemberCount; let result = proposal.dispatch(RawOrigin::Members(1, seats).into()); - Self::deposit_event(Event::Executed( + Self::deposit_event(Event::Executed { proposal_hash, - result.map(|_| ()).map_err(|e| e.error), - )); + result: result.map(|_| ()).map_err(|e| e.error), + }); Ok(get_result_weight(result) .map(|w| { @@ -552,7 +556,12 @@ pub mod pallet { }; >::insert(proposal_hash, votes); - Self::deposit_event(Event::Proposed(who, index, proposal_hash, threshold)); + Self::deposit_event(Event::Proposed { + account: who, + proposal_index: index, + proposal_hash, + threshold, + }); Ok(Some(T::WeightInfo::propose_proposed( proposal_len as u32, // B @@ -620,7 +629,13 @@ pub mod pallet { let yes_votes = voting.ayes.len() as MemberCount; let no_votes = voting.nays.len() as MemberCount; - Self::deposit_event(Event::Voted(who, proposal, approve, yes_votes, no_votes)); + Self::deposit_event(Event::Voted { + account: who, + proposal_hash: proposal, + voted: approve, + yes: yes_votes, + no: no_votes, + }); Voting::::insert(&proposal, voting); @@ -701,7 +716,7 @@ pub mod pallet { length_bound, proposal_weight_bound, )?; - Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed { proposal_hash, yes: yes_votes, no: no_votes }); let (proposal_weight, proposal_count) = Self::do_approve_proposal(seats, yes_votes, proposal_hash, proposal); return Ok(( @@ -713,7 +728,7 @@ pub mod pallet { ) .into()) } else if disapproved { - Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed { proposal_hash, yes: yes_votes, no: no_votes }); let proposal_count = Self::do_disapprove_proposal(proposal_hash); return Ok(( Some(T::WeightInfo::close_early_disapproved(seats, proposal_count)), @@ -746,7 +761,7 @@ pub mod pallet { length_bound, proposal_weight_bound, )?; - Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed { proposal_hash, yes: yes_votes, no: no_votes }); let (proposal_weight, proposal_count) = Self::do_approve_proposal(seats, yes_votes, proposal_hash, proposal); Ok(( @@ -758,7 +773,7 @@ pub mod pallet { ) .into()) } else { - Self::deposit_event(Event::Closed(proposal_hash, yes_votes, no_votes)); + Self::deposit_event(Event::Closed { proposal_hash, yes: yes_votes, no: no_votes }); let proposal_count = Self::do_disapprove_proposal(proposal_hash); Ok((Some(T::WeightInfo::close_disapproved(seats, proposal_count)), Pays::No).into()) } @@ -848,15 +863,15 @@ impl, I: 'static> Pallet { proposal_hash: T::Hash, proposal: >::Proposal, ) -> (Weight, u32) { - Self::deposit_event(Event::Approved(proposal_hash)); + Self::deposit_event(Event::Approved { proposal_hash }); let dispatch_weight = proposal.get_dispatch_info().weight; let origin = RawOrigin::Members(yes_votes, seats).into(); let result = proposal.dispatch(origin); - Self::deposit_event(Event::Executed( + Self::deposit_event(Event::Executed { proposal_hash, - result.map(|_| ()).map_err(|e| e.error), - )); + result: result.map(|_| ()).map_err(|e| e.error), + }); // default to the dispatch info weight for safety let proposal_weight = get_result_weight(result).unwrap_or(dispatch_weight); // P1 @@ -866,7 +881,7 @@ impl, I: 'static> Pallet { fn do_disapprove_proposal(proposal_hash: T::Hash) -> u32 { // disapproved - Self::deposit_event(Event::Disapproved(proposal_hash)); + Self::deposit_event(Event::Disapproved { proposal_hash }); Self::remove_proposal(proposal_hash) } diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index b8feb64867cf8..7e52b10a9b1d6 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -216,11 +216,32 @@ fn close_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::Collective(CollectiveEvent::Closed(hash, 2, 1))), - record(Event::Collective(CollectiveEvent::Disapproved(hash))) + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 3 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 2, + no: 1 + })), + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) ] ); }); @@ -315,11 +336,32 @@ fn close_with_prime_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::Collective(CollectiveEvent::Closed(hash, 2, 1))), - record(Event::Collective(CollectiveEvent::Disapproved(hash))) + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 3 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 2, + no: 1 + })), + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) ] ); }); @@ -354,15 +396,36 @@ fn close_with_voting_prime_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::Collective(CollectiveEvent::Closed(hash, 3, 0))), - record(Event::Collective(CollectiveEvent::Approved(hash))), - record(Event::Collective(CollectiveEvent::Executed( - hash, - Err(DispatchError::BadOrigin) - ))) + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 3 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 3, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(Event::Collective(CollectiveEvent::Executed { + proposal_hash: hash, + result: Err(DispatchError::BadOrigin) + })) ] ); }); @@ -404,16 +467,45 @@ fn close_with_no_prime_but_majority_works() { assert_eq!( System::events(), vec![ - record(Event::CollectiveMajority(CollectiveEvent::Proposed(1, 0, hash, 5))), - record(Event::CollectiveMajority(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::CollectiveMajority(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::CollectiveMajority(CollectiveEvent::Voted(3, hash, true, 3, 0))), - record(Event::CollectiveMajority(CollectiveEvent::Closed(hash, 5, 0))), - record(Event::CollectiveMajority(CollectiveEvent::Approved(hash))), - record(Event::CollectiveMajority(CollectiveEvent::Executed( - hash, - Err(DispatchError::BadOrigin) - ))) + record(Event::CollectiveMajority(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 5 + })), + record(Event::CollectiveMajority(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::CollectiveMajority(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::CollectiveMajority(CollectiveEvent::Voted { + account: 3, + proposal_hash: hash, + voted: true, + yes: 3, + no: 0 + })), + record(Event::CollectiveMajority(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 5, + no: 0 + })), + record(Event::CollectiveMajority(CollectiveEvent::Approved { + proposal_hash: hash + })), + record(Event::CollectiveMajority(CollectiveEvent::Executed { + proposal_hash: hash, + result: Err(DispatchError::BadOrigin) + })) ] ); }); @@ -537,7 +629,12 @@ fn propose_works() { assert_eq!( System::events(), - vec![record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3)))] + vec![record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 3 + }))] ); }); } @@ -696,9 +793,26 @@ fn motions_vote_after_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, false, 0, 1))), + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 2 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: false, + yes: 0, + no: 1 + })), ] ); }); @@ -812,15 +926,36 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::Collective(CollectiveEvent::Closed(hash, 2, 0))), - record(Event::Collective(CollectiveEvent::Approved(hash))), - record(Event::Collective(CollectiveEvent::Executed( - hash, - Err(DispatchError::BadOrigin) - ))), + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 2 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(Event::Collective(CollectiveEvent::Executed { + proposal_hash: hash, + result: Err(DispatchError::BadOrigin) + })), ] ); @@ -840,14 +975,44 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 1, hash, 2))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::Collective(CollectiveEvent::Voted(3, hash, true, 3, 0))), - record(Event::Collective(CollectiveEvent::Closed(hash, 3, 0))), - record(Event::Collective(CollectiveEvent::Approved(hash))), + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 1, + proposal_hash: hash, + threshold: 2 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 3, + proposal_hash: hash, + voted: true, + yes: 3, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 3, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), record(Event::Democracy(mock_democracy::pallet::Event::::ExternalProposed)), - record(Event::Collective(CollectiveEvent::Executed(hash, Ok(())))), + record(Event::Collective(CollectiveEvent::Executed { + proposal_hash: hash, + result: Ok(()) + })), ] ); }); @@ -873,11 +1038,32 @@ fn motions_disapproval_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, false, 1, 1))), - record(Event::Collective(CollectiveEvent::Closed(hash, 1, 1))), - record(Event::Collective(CollectiveEvent::Disapproved(hash))), + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 3 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: false, + yes: 1, + no: 1 + })), + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 1, + no: 1 + })), + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })), ] ); }); @@ -903,15 +1089,36 @@ fn motions_approval_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::Collective(CollectiveEvent::Closed(hash, 2, 0))), - record(Event::Collective(CollectiveEvent::Approved(hash))), - record(Event::Collective(CollectiveEvent::Executed( - hash, - Err(DispatchError::BadOrigin) - ))), + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 2 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(Event::Collective(CollectiveEvent::Executed { + proposal_hash: hash, + result: Err(DispatchError::BadOrigin) + })), ] ); }); @@ -932,7 +1139,12 @@ fn motion_with_no_votes_closes_with_disapproval() { )); assert_eq!( System::events()[0], - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 3))) + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 3 + })) ); // Closing the motion too early is not possible because it has neither @@ -951,11 +1163,15 @@ fn motion_with_no_votes_closes_with_disapproval() { // Events show that the close ended in a disapproval. assert_eq!( System::events()[1], - record(Event::Collective(CollectiveEvent::Closed(hash, 0, 3))) + record(Event::Collective(CollectiveEvent::Closed { + proposal_hash: hash, + yes: 0, + no: 3 + })) ); assert_eq!( System::events()[2], - record(Event::Collective(CollectiveEvent::Disapproved(hash))) + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) ); }) } @@ -1015,10 +1231,27 @@ fn disapprove_proposal_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed(1, 0, hash, 2))), - record(Event::Collective(CollectiveEvent::Voted(1, hash, true, 1, 0))), - record(Event::Collective(CollectiveEvent::Voted(2, hash, true, 2, 0))), - record(Event::Collective(CollectiveEvent::Disapproved(hash))), + record(Event::Collective(CollectiveEvent::Proposed { + account: 1, + proposal_index: 0, + proposal_hash: hash, + threshold: 2 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 1, + proposal_hash: hash, + voted: true, + yes: 1, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Voted { + account: 2, + proposal_hash: hash, + voted: true, + yes: 2, + no: 0 + })), + record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })), ] ); }) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index bd5dbae5b34a6..2214ce8f40b1a 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -447,7 +447,10 @@ fn instantiate_and_call_and_deposit_event() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Deposit(ALICE, 1_000_000)), + event: Event::Balances(pallet_balances::Event::Deposit { + who: ALICE, + amount: 1_000_000 + }), topics: vec![], }, EventRecord { @@ -457,7 +460,10 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed(ALICE, 1_000_000)), + event: Event::Balances(pallet_balances::Event::Endowed { + account: ALICE, + free_balance: 1_000_000 + }), topics: vec![], }, EventRecord { @@ -467,19 +473,19 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed( - addr.clone(), - subsistence * 100 - )), + event: Event::Balances(pallet_balances::Event::Endowed { + account: addr.clone(), + free_balance: subsistence * 100 + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer( - ALICE, - addr.clone(), - subsistence * 100 - )), + event: Event::Balances(pallet_balances::Event::Transfer { + from: ALICE, + to: addr.clone(), + amount: subsistence * 100 + }), topics: vec![], }, EventRecord { @@ -766,11 +772,11 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer( - addr.clone(), - DJANGO, - 100_000, - )), + event: Event::Balances(pallet_balances::Event::Transfer { + from: addr.clone(), + to: DJANGO, + amount: 100_000, + }), topics: vec![], }, EventRecord { diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 34bcb0da301e6..136c2d2a7c9e2 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -774,7 +774,7 @@ benchmarks! { }: enact_proposal(RawOrigin::Root, proposal_hash, 0) verify { // Fails due to mismatched origin - assert_last_event::(Event::::Executed(0, Err(BadOrigin.into())).into()); + assert_last_event::(Event::::Executed { ref_index: 0, result: Err(BadOrigin.into()) }.into()); } #[extra] diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 893e4676bef7b..529bcebc8e374 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -507,45 +507,45 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A motion has been proposed by a public account. \[proposal_index, deposit\] - Proposed(PropIndex, BalanceOf), - /// A public proposal has been tabled for referendum vote. \[proposal_index, deposit, - /// depositors\] - Tabled(PropIndex, BalanceOf, Vec), + /// A motion has been proposed by a public account. + Proposed { proposal_index: PropIndex, deposit: BalanceOf }, + /// A public proposal has been tabled for referendum vote. + Tabled { proposal_index: PropIndex, deposit: BalanceOf, depositors: Vec }, /// An external proposal has been tabled. ExternalTabled, - /// A referendum has begun. \[ref_index, threshold\] - Started(ReferendumIndex, VoteThreshold), - /// A proposal has been approved by referendum. \[ref_index\] - Passed(ReferendumIndex), - /// A proposal has been rejected by referendum. \[ref_index\] - NotPassed(ReferendumIndex), - /// A referendum has been cancelled. \[ref_index\] - Cancelled(ReferendumIndex), - /// A proposal has been enacted. \[ref_index, result\] - Executed(ReferendumIndex, DispatchResult), - /// An account has delegated their vote to another account. \[who, target\] - Delegated(T::AccountId, T::AccountId), - /// An \[account\] has cancelled a previous delegation operation. - Undelegated(T::AccountId), - /// An external proposal has been vetoed. \[who, proposal_hash, until\] - Vetoed(T::AccountId, T::Hash, T::BlockNumber), - /// A proposal's preimage was noted, and the deposit taken. \[proposal_hash, who, deposit\] - PreimageNoted(T::Hash, T::AccountId, BalanceOf), + /// A referendum has begun. + Started { ref_index: ReferendumIndex, threshold: VoteThreshold }, + /// A proposal has been approved by referendum. + Passed { ref_index: ReferendumIndex }, + /// A proposal has been rejected by referendum. + NotPassed { ref_index: ReferendumIndex }, + /// A referendum has been cancelled. + Cancelled { ref_index: ReferendumIndex }, + /// A proposal has been enacted. + Executed { ref_index: ReferendumIndex, result: DispatchResult }, + /// An account has delegated their vote to another account. + Delegated { who: T::AccountId, target: T::AccountId }, + /// An account has cancelled a previous delegation operation. + Undelegated { account: T::AccountId }, + /// An external proposal has been vetoed. + Vetoed { who: T::AccountId, proposal_hash: T::Hash, until: T::BlockNumber }, + /// A proposal's preimage was noted, and the deposit taken. + PreimageNoted { proposal_hash: T::Hash, who: T::AccountId, deposit: BalanceOf }, /// A proposal preimage was removed and used (the deposit was returned). - /// \[proposal_hash, provider, deposit\] - PreimageUsed(T::Hash, T::AccountId, BalanceOf), + PreimageUsed { proposal_hash: T::Hash, provider: T::AccountId, deposit: BalanceOf }, /// A proposal could not be executed because its preimage was invalid. - /// \[proposal_hash, ref_index\] - PreimageInvalid(T::Hash, ReferendumIndex), + PreimageInvalid { proposal_hash: T::Hash, ref_index: ReferendumIndex }, /// A proposal could not be executed because its preimage was missing. - /// \[proposal_hash, ref_index\] - PreimageMissing(T::Hash, ReferendumIndex), + PreimageMissing { proposal_hash: T::Hash, ref_index: ReferendumIndex }, /// A registered preimage was removed and the deposit collected by the reaper. - /// \[proposal_hash, provider, deposit, reaper\] - PreimageReaped(T::Hash, T::AccountId, BalanceOf, T::AccountId), - /// A proposal \[hash\] has been blacklisted permanently. - Blacklisted(T::Hash), + PreimageReaped { + proposal_hash: T::Hash, + provider: T::AccountId, + deposit: BalanceOf, + reaper: T::AccountId, + }, + /// A proposal_hash has been blacklisted permanently. + Blacklisted { proposal_hash: T::Hash }, } #[pallet::error] @@ -657,7 +657,7 @@ pub mod pallet { >::append((index, proposal_hash, who)); - Self::deposit_event(Event::::Proposed(index, value)); + Self::deposit_event(Event::::Proposed { proposal_index: index, deposit: value }); Ok(()) } @@ -882,7 +882,7 @@ pub mod pallet { let until = >::block_number() + T::CooloffPeriod::get(); >::insert(&proposal_hash, (until, existing_vetoers)); - Self::deposit_event(Event::::Vetoed(who, proposal_hash, until)); + Self::deposit_event(Event::::Vetoed { who, proposal_hash, until }); >::kill(); Ok(()) } @@ -1104,7 +1104,12 @@ pub mod pallet { T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); debug_assert!(res.is_ok()); >::remove(&proposal_hash); - Self::deposit_event(Event::::PreimageReaped(proposal_hash, provider, deposit, who)); + Self::deposit_event(Event::::PreimageReaped { + proposal_hash, + provider, + deposit, + reaper: who, + }); Ok(()) } @@ -1249,7 +1254,7 @@ pub mod pallet { } } - Self::deposit_event(Event::::Blacklisted(proposal_hash)); + Self::deposit_event(Event::::Blacklisted { proposal_hash }); Ok(()) } @@ -1330,7 +1335,7 @@ impl Pallet { /// Remove a referendum. pub fn internal_cancel_referendum(ref_index: ReferendumIndex) { - Self::deposit_event(Event::::Cancelled(ref_index)); + Self::deposit_event(Event::::Cancelled { ref_index }); ReferendumInfoOf::::remove(ref_index); } @@ -1532,7 +1537,7 @@ impl Pallet { T::Currency::extend_lock(DEMOCRACY_ID, &who, balance, WithdrawReasons::TRANSFER); Ok(votes) })?; - Self::deposit_event(Event::::Delegated(who, target)); + Self::deposit_event(Event::::Delegated { who, target }); Ok(votes) } @@ -1558,7 +1563,7 @@ impl Pallet { Voting::Direct { .. } => Err(Error::::NotDelegating.into()), } })?; - Self::deposit_event(Event::::Undelegated(who)); + Self::deposit_event(Event::::Undelegated { account: who }); Ok(votes) } @@ -1589,7 +1594,7 @@ impl Pallet { ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); >::insert(ref_index, item); - Self::deposit_event(Event::::Started(ref_index, threshold)); + Self::deposit_event(Event::::Started { ref_index, threshold }); ref_index } @@ -1635,7 +1640,11 @@ impl Pallet { for d in &depositors { T::Currency::unreserve(d, deposit); } - Self::deposit_event(Event::::Tabled(prop_index, deposit, depositors)); + Self::deposit_event(Event::::Tabled { + proposal_index: prop_index, + deposit, + depositors, + }); Self::inject_referendum( now + T::VotingPeriod::get(), proposal, @@ -1655,22 +1664,25 @@ impl Pallet { if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { let err_amount = T::Currency::unreserve(&provider, deposit); debug_assert!(err_amount.is_zero()); - Self::deposit_event(Event::::PreimageUsed(proposal_hash, provider, deposit)); + Self::deposit_event(Event::::PreimageUsed { proposal_hash, provider, deposit }); let res = proposal .dispatch(frame_system::RawOrigin::Root.into()) .map(|_| ()) .map_err(|e| e.error); - Self::deposit_event(Event::::Executed(index, res)); + Self::deposit_event(Event::::Executed { ref_index: index, result: res }); Ok(()) } else { T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); - Self::deposit_event(Event::::PreimageInvalid(proposal_hash, index)); + Self::deposit_event(Event::::PreimageInvalid { + proposal_hash, + ref_index: index, + }); Err(Error::::PreimageInvalid.into()) } } else { - Self::deposit_event(Event::::PreimageMissing(proposal_hash, index)); + Self::deposit_event(Event::::PreimageMissing { proposal_hash, ref_index: index }); Err(Error::::PreimageMissing.into()) } } @@ -1684,7 +1696,7 @@ impl Pallet { let approved = status.threshold.approved(status.tally, total_issuance); if approved { - Self::deposit_event(Event::::Passed(index)); + Self::deposit_event(Event::::Passed { ref_index: index }); if status.delay.is_zero() { let _ = Self::do_enact_proposal(status.proposal_hash, index); } else { @@ -1713,7 +1725,7 @@ impl Pallet { } } } else { - Self::deposit_event(Event::::NotPassed(index)); + Self::deposit_event(Event::::NotPassed { ref_index: index }); } approved @@ -1870,7 +1882,7 @@ impl Pallet { }; >::insert(proposal_hash, a); - Self::deposit_event(Event::::PreimageNoted(proposal_hash, who, deposit)); + Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit }); Ok(()) } @@ -1896,7 +1908,7 @@ impl Pallet { }; >::insert(proposal_hash, a); - Self::deposit_event(Event::::PreimageNoted(proposal_hash, who, free)); + Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit: free }); Ok(()) } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 4c4de82af592f..cdf5a2098d6bb 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -886,10 +886,10 @@ pub mod pallet { log!(info, "queued unsigned solution with score {:?}", ready.score); let ejected_a_solution = >::exists(); >::put(ready); - Self::deposit_event(Event::SolutionStored( - ElectionCompute::Unsigned, - ejected_a_solution, - )); + Self::deposit_event(Event::SolutionStored { + election_compute: ElectionCompute::Unsigned, + prev_ejected: ejected_a_solution, + }); Ok(None.into()) } @@ -1012,7 +1012,10 @@ pub mod pallet { } signed_submissions.put(); - Self::deposit_event(Event::SolutionStored(ElectionCompute::Signed, ejected_a_solution)); + Self::deposit_event(Event::SolutionStored { + election_compute: ElectionCompute::Signed, + prev_ejected: ejected_a_solution, + }); Ok(()) } } @@ -1026,18 +1029,18 @@ pub mod pallet { /// solution is unsigned, this means that it has also been processed. /// /// The `bool` is `true` when a previous solution was ejected to make room for this one. - SolutionStored(ElectionCompute, bool), + SolutionStored { election_compute: ElectionCompute, prev_ejected: bool }, /// The election has been finalized, with `Some` of the given computation, or else if the /// election failed, `None`. - ElectionFinalized(Option), + ElectionFinalized { election_compute: Option }, /// An account has been rewarded for their signed submission being finalized. - Rewarded(::AccountId, BalanceOf), + Rewarded { account: ::AccountId, value: BalanceOf }, /// An account has been slashed for submitting an invalid signed submission. - Slashed(::AccountId, BalanceOf), + Slashed { account: ::AccountId, value: BalanceOf }, /// The signed phase of the given round has started. - SignedPhaseStarted(u32), + SignedPhaseStarted { round: u32 }, /// The unsigned phase of the given round has started. - UnsignedPhaseStarted(u32), + UnsignedPhaseStarted { round: u32 }, } /// Error of the pallet that can be returned in response to dispatches. @@ -1245,7 +1248,7 @@ impl Pallet { pub fn on_initialize_open_signed() { log!(info, "Starting signed phase round {}.", Self::round()); >::put(Phase::Signed); - Self::deposit_event(Event::SignedPhaseStarted(Self::round())); + Self::deposit_event(Event::SignedPhaseStarted { round: Self::round() }); } /// Logic for [`>::on_initialize`] when unsigned phase is being opened. @@ -1253,7 +1256,7 @@ impl Pallet { let round = Self::round(); log!(info, "Starting unsigned phase round {} enabled {}.", round, enabled); >::put(Phase::Unsigned((enabled, now))); - Self::deposit_event(Event::UnsignedPhaseStarted(round)); + Self::deposit_event(Event::UnsignedPhaseStarted { round }); } /// Parts of [`create_snapshot`] that happen inside of this pallet. @@ -1473,14 +1476,14 @@ impl Pallet { |ReadySolution { supports, compute, .. }| Ok((supports, compute)), ) .map(|(supports, compute)| { - Self::deposit_event(Event::ElectionFinalized(Some(compute))); + Self::deposit_event(Event::ElectionFinalized { election_compute: Some(compute) }); if Self::round() != 1 { log!(info, "Finalized election round with compute {:?}.", compute); } supports }) .map_err(|err| { - Self::deposit_event(Event::ElectionFinalized(None)); + Self::deposit_event(Event::ElectionFinalized { election_compute: None }); if Self::round() != 1 { log!(warn, "Failed to finalize election round. reason {:?}", err); } @@ -1737,7 +1740,7 @@ mod tests { roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); - assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert!(MultiPhase::snapshot().is_some()); assert_eq!(MultiPhase::round(), 1); @@ -1750,7 +1753,10 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert_eq!( multi_phase_events(), - vec![Event::SignedPhaseStarted(1), Event::UnsignedPhaseStarted(1)], + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 } + ], ); assert!(MultiPhase::snapshot().is_some()); @@ -1861,7 +1867,7 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Off); roll_to(15); - assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); @@ -1873,8 +1879,8 @@ mod tests { assert_eq!( multi_phase_events(), vec![ - Event::SignedPhaseStarted(1), - Event::ElectionFinalized(Some(ElectionCompute::Fallback)) + Event::SignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { election_compute: Some(ElectionCompute::Fallback) } ], ); // All storage items must be cleared. @@ -1896,7 +1902,7 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Off); roll_to(15); - assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 61215059c53a6..b762ad706486c 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -429,7 +429,7 @@ impl Pallet { >::put(ready_solution); // emit reward event - Self::deposit_event(crate::Event::Rewarded(who.clone(), reward)); + Self::deposit_event(crate::Event::Rewarded { account: who.clone(), value: reward }); // unreserve deposit. let _remaining = T::Currency::unreserve(who, deposit); @@ -446,7 +446,7 @@ impl Pallet { /// /// Infallible pub fn finalize_signed_phase_reject_solution(who: &T::AccountId, deposit: BalanceOf) { - Self::deposit_event(crate::Event::Slashed(who.clone(), deposit)); + Self::deposit_event(crate::Event::Slashed { account: who.clone(), value: deposit }); let (negative_imbalance, _remaining) = T::Currency::slash_reserved(who, deposit); debug_assert!(_remaining.is_zero()); T::SlashHandler::on_unbalanced(negative_imbalance); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index d7b42383da757..116c0937bf983 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -425,7 +425,7 @@ pub mod pallet { Renouncing::Member => { let _ = Self::remove_and_replace_member(&who, false) .map_err(|_| Error::::InvalidRenouncing)?; - Self::deposit_event(Event::Renounced(who)); + Self::deposit_event(Event::Renounced { candidate: who }); }, Renouncing::RunnerUp => { >::try_mutate::<_, Error, _>(|runners_up| { @@ -437,7 +437,7 @@ pub mod pallet { let SeatHolder { deposit, .. } = runners_up.remove(index); let _remainder = T::Currency::unreserve(&who, deposit); debug_assert!(_remainder.is_zero()); - Self::deposit_event(Event::Renounced(who)); + Self::deposit_event(Event::Renounced { candidate: who }); Ok(()) })?; }, @@ -450,7 +450,7 @@ pub mod pallet { let (_removed, deposit) = candidates.remove(index); let _remainder = T::Currency::unreserve(&who, deposit); debug_assert!(_remainder.is_zero()); - Self::deposit_event(Event::Renounced(who)); + Self::deposit_event(Event::Renounced { candidate: who }); Ok(()) })?; }, @@ -496,7 +496,7 @@ pub mod pallet { let had_replacement = Self::remove_and_replace_member(&who, true)?; debug_assert_eq!(has_replacement, had_replacement); - Self::deposit_event(Event::MemberKicked(who.clone())); + Self::deposit_event(Event::MemberKicked { member: who.clone() }); if !had_replacement { Self::do_phragmen(); @@ -534,29 +534,32 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A new term with \[new_members\]. This indicates that enough candidates existed to run + /// A new term with new_members. This indicates that enough candidates existed to run /// the election, not that enough have has been elected. The inner value must be examined /// for this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond /// slashed and none were elected, whilst `EmptyTerm` means that no candidates existed to /// begin with. - NewTerm(Vec<(::AccountId, BalanceOf)>), + NewTerm { new_members: Vec<(::AccountId, BalanceOf)> }, /// No (or not enough) candidates existed for this round. This is different from /// `NewTerm(\[\])`. See the description of `NewTerm`. EmptyTerm, /// Internal error happened while trying to perform election. ElectionError, - /// A \[member\] has been removed. This should always be followed by either `NewTerm` or + /// A member has been removed. This should always be followed by either `NewTerm` or /// `EmptyTerm`. - MemberKicked(::AccountId), + MemberKicked { member: ::AccountId }, /// Someone has renounced their candidacy. - Renounced(::AccountId), - /// A \[candidate\] was slashed by \[amount\] due to failing to obtain a seat as member or + Renounced { candidate: ::AccountId }, + /// A candidate was slashed by amount due to failing to obtain a seat as member or /// runner-up. /// /// Note that old members and runners-up are also candidates. - CandidateSlashed(::AccountId, BalanceOf), - /// A \[seat holder\] was slashed by \[amount\] by being forcefully removed from the set. - SeatHolderSlashed(::AccountId, BalanceOf), + CandidateSlashed { candidate: ::AccountId, amount: BalanceOf }, + /// A seat holder was slashed by amount by being forcefully removed from the set. + SeatHolderSlashed { + seat_holder: ::AccountId, + amount: BalanceOf, + }, } #[deprecated(note = "use `Event` instead")] @@ -748,7 +751,10 @@ impl Pallet { let (imbalance, _remainder) = T::Currency::slash_reserved(who, removed.deposit); debug_assert!(_remainder.is_zero()); T::LoserCandidate::on_unbalanced(imbalance); - Self::deposit_event(Event::SeatHolderSlashed(who.clone(), removed.deposit)); + Self::deposit_event(Event::SeatHolderSlashed { + seat_holder: who.clone(), + amount: removed.deposit, + }); } else { T::Currency::unreserve(who, removed.deposit); } @@ -1001,7 +1007,10 @@ impl Pallet { { let (imbalance, _) = T::Currency::slash_reserved(c, *d); T::LoserCandidate::on_unbalanced(imbalance); - Self::deposit_event(Event::CandidateSlashed(c.clone(), *d)); + Self::deposit_event(Event::CandidateSlashed { + candidate: c.clone(), + amount: *d, + }); } }); @@ -1041,7 +1050,7 @@ impl Pallet { // clean candidates. >::kill(); - Self::deposit_event(Event::NewTerm(new_members_sorted_by_id)); + Self::deposit_event(Event::NewTerm { new_members: new_members_sorted_by_id }); >::mutate(|v| *v += 1); }) .map_err(|e| { @@ -2147,10 +2156,9 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![ - (4, 40), - (5, 50), - ]))); + System::assert_last_event(Event::Elections(super::Event::NewTerm { + new_members: vec![(4, 40), (5, 50)], + })); assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); assert_eq!(runners_up_and_stake(), vec![]); @@ -2161,7 +2169,9 @@ mod tests { System::set_block_number(10); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![]))); + System::assert_last_event(Event::Elections(super::Event::NewTerm { + new_members: vec![], + })); // outgoing have lost their bond. assert_eq!(balances(&4), (37, 0)); @@ -2231,7 +2241,9 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); assert!(members_ids().is_empty()); - System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![]))); + System::assert_last_event(Event::Elections(super::Event::NewTerm { + new_members: vec![], + })); }); } @@ -2583,10 +2595,9 @@ mod tests { // 5 is an outgoing loser. will also get slashed. assert_eq!(balances(&5), (45, 2)); - System::assert_has_event(Event::Elections(super::Event::NewTerm(vec![ - (4, 40), - (5, 50), - ]))); + System::assert_has_event(Event::Elections(super::Event::NewTerm { + new_members: vec![(4, 40), (5, 50)], + })); }) } diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index ac13bce31b0f6..7ca11f4ed20e8 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -465,15 +465,14 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Reaped \[voter, reaper\]. - VoterReaped(T::AccountId, T::AccountId), - /// Slashed \[reaper\]. - BadReaperSlashed(T::AccountId), - /// A tally (for approval votes of \[seats\]) has started. - TallyStarted(u32), + /// Reaped + VoterReaped { voter: T::AccountId, reaper: T::AccountId }, + /// Slashed + BadReaperSlashed { reaper: T::AccountId }, + /// A tally (for approval votes of seats) has started. + TallyStarted { seats: u32 }, /// A tally (for approval votes of seat(s)) has ended (with one or more new members). - /// \[incoming, outgoing\] - TallyFinalized(Vec, Vec), + TallyFinalized { incoming: Vec, outgoing: Vec }, } #[pallet::call] @@ -590,11 +589,11 @@ pub mod pallet { T::VotingBond::get(), BalanceStatus::Free, )?; - Self::deposit_event(Event::::VoterReaped(who, reporter)); + Self::deposit_event(Event::::VoterReaped { voter: who, reaper: reporter }); } else { let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; T::BadReaper::on_unbalanced(imbalance); - Self::deposit_event(Event::::BadReaperSlashed(reporter)); + Self::deposit_event(Event::::BadReaperSlashed { reaper: reporter }); } Ok(()) } @@ -1024,7 +1023,7 @@ impl Pallet { leaderboard_size ]); - Self::deposit_event(Event::::TallyStarted(empty_seats as u32)); + Self::deposit_event(Event::::TallyStarted { seats: empty_seats as u32 }); } } @@ -1118,7 +1117,7 @@ impl Pallet { new_candidates.truncate(last_index + 1); } - Self::deposit_event(Event::::TallyFinalized(incoming, outgoing)); + Self::deposit_event(Event::::TallyFinalized { incoming, outgoing }); >::put(new_candidates); CandidateCount::::put(count); diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index b172acb66d324..25ff76f4d1514 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -525,7 +525,7 @@ pub mod pallet { }); // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(Event::AccumulateDummy(increase_by)); + Self::deposit_event(Event::AccumulateDummy { balance: increase_by }); // All good, no refund. Ok(()) @@ -557,7 +557,7 @@ pub mod pallet { // Put the new value into storage. >::put(new_value); - Self::deposit_event(Event::SetDummy(new_value)); + Self::deposit_event(Event::SetDummy { balance: new_value }); // All good, no refund. Ok(()) @@ -574,9 +574,16 @@ pub mod pallet { pub enum Event { // Just a normal `enum`, here's a dummy event to ensure it compiles. /// Dummy event, just here so there's a generic type that's used. - AccumulateDummy(BalanceOf), - SetDummy(BalanceOf), - SetBar(T::AccountId, BalanceOf), + AccumulateDummy { + balance: BalanceOf, + }, + SetDummy { + balance: BalanceOf, + }, + SetBar { + account: T::AccountId, + balance: BalanceOf, + }, } // pallet::storage attributes allow for type-safe usage of the Substrate storage database, diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index e5f2e00d9a344..9812d35ffa074 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -291,8 +291,7 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Event generated when new price is accepted to contribute to the average. - /// \[price, who\] - NewPrice(u32, T::AccountId), + NewPrice { price: u32, who: T::AccountId }, } #[pallet::validate_unsigned] @@ -658,7 +657,7 @@ impl Pallet { .expect("The average is not empty, because it was just mutated; qed"); log::info!("Current average price is: {}", average); // here we are raising the NewPrice event - Self::deposit_event(Event::NewPrice(price, who)); + Self::deposit_event(Event::NewPrice { price, who }); } /// Calculate current average price. diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 1594601b457cb..b494eed67445c 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -272,17 +272,23 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A bid was successfully placed. - /// \[ who, amount, duration \] - BidPlaced(T::AccountId, BalanceOf, u32), + BidPlaced { who: T::AccountId, amount: BalanceOf, duration: u32 }, /// A bid was successfully removed (before being accepted as a gilt). - /// \[ who, amount, duration \] - BidRetracted(T::AccountId, BalanceOf, u32), + BidRetracted { who: T::AccountId, amount: BalanceOf, duration: u32 }, /// A bid was accepted as a gilt. The balance may not be released until expiry. - /// \[ index, expiry, who, amount \] - GiltIssued(ActiveIndex, T::BlockNumber, T::AccountId, BalanceOf), + GiltIssued { + index: ActiveIndex, + expiry: T::BlockNumber, + who: T::AccountId, + amount: BalanceOf, + }, /// An expired gilt has been thawed. - /// \[ index, who, original_amount, additional_amount \] - GiltThawed(ActiveIndex, T::AccountId, BalanceOf, BalanceOf), + GiltThawed { + index: ActiveIndex, + who: T::AccountId, + original_amount: BalanceOf, + additional_amount: BalanceOf, + }, } #[pallet::error] @@ -376,7 +382,7 @@ pub mod pallet { qs[queue_index].0 += net.0; qs[queue_index].1 = qs[queue_index].1.saturating_add(net.1); }); - Self::deposit_event(Event::BidPlaced(who.clone(), amount, duration)); + Self::deposit_event(Event::BidPlaced { who: who.clone(), amount, duration }); Ok(().into()) } @@ -414,7 +420,7 @@ pub mod pallet { }); T::Currency::unreserve(&bid.who, bid.amount); - Self::deposit_event(Event::BidRetracted(bid.who, bid.amount, duration)); + Self::deposit_event(Event::BidRetracted { who: bid.who, amount: bid.amount, duration }); Ok(().into()) } @@ -493,7 +499,12 @@ pub mod pallet { debug_assert!(err_amt.is_zero()); } - let e = Event::GiltThawed(index, gilt.who, gilt.amount, gilt_value); + let e = Event::GiltThawed { + index, + who: gilt.who, + original_amount: gilt.amount, + additional_amount: gilt_value, + }; Self::deposit_event(e); }); @@ -603,7 +614,8 @@ pub mod pallet { totals.frozen += bid.amount; totals.proportion = totals.proportion.saturating_add(proportion); totals.index += 1; - let e = Event::GiltIssued(index, expiry, who.clone(), amount); + let e = + Event::GiltIssued { index, expiry, who: who.clone(), amount }; Self::deposit_event(e); let gilt = ActiveGilt { amount, proportion, who, expiry }; Active::::insert(index, gilt); diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 0e7d885649cc3..b289da464a93f 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -154,9 +154,9 @@ pub mod pallet { // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { Self::set_grandpa_authorities(&pending_change.next_authorities); - Self::deposit_event(Event::NewAuthorities( - pending_change.next_authorities.to_vec(), - )); + Self::deposit_event(Event::NewAuthorities { + authority_set: pending_change.next_authorities.to_vec(), + }); >::kill(); } } @@ -255,8 +255,8 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(fn deposit_event)] pub enum Event { - /// New authority set has been applied. \[authority_set\] - NewAuthorities(AuthorityList), + /// New authority set has been applied. + NewAuthorities { authority_set: AuthorityList }, /// Current authority set has been paused. Paused, /// Current authority set has been resumed. diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 98f54f966fadc..6dc0a26da8bd3 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -57,7 +57,10 @@ fn authorities_change_logged() { System::events(), vec![EventRecord { phase: Phase::Finalization, - event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), + event: Event::NewAuthorities { + authority_set: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + } + .into(), topics: vec![], },] ); @@ -93,7 +96,10 @@ fn authorities_change_logged_after_delay() { System::events(), vec![EventRecord { phase: Phase::Finalization, - event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), + event: Event::NewAuthorities { + authority_set: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + } + .into(), topics: vec![], },] ); diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 68869a43992f9..db257fec43a13 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -153,7 +153,7 @@ benchmarks! { }; }: _(RawOrigin::Signed(caller.clone()), Box::new(create_identity_info::(x))) verify { - assert_last_event::(Event::::IdentitySet(caller).into()); + assert_last_event::(Event::::IdentitySet { who: caller }.into()); } // We need to split `set_subs` into two benchmarks to accurately isolate the potential @@ -237,7 +237,7 @@ benchmarks! { }; }: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()) verify { - assert_last_event::(Event::::JudgementRequested(caller, r-1).into()); + assert_last_event::(Event::::JudgementRequested { who: caller, registrar_index: r-1 }.into()); } cancel_request { @@ -257,7 +257,7 @@ benchmarks! { Identity::::request_judgement(caller_origin, r - 1, 10u32.into())?; }: _(RawOrigin::Signed(caller.clone()), r - 1) verify { - assert_last_event::(Event::::JudgementUnrequested(caller, r-1).into()); + assert_last_event::(Event::::JudgementUnrequested { who: caller, registrar_index: r-1 }.into()); } set_fee { @@ -328,7 +328,7 @@ benchmarks! { Identity::::request_judgement(user_origin.clone(), r, 10u32.into())?; }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) verify { - assert_last_event::(Event::::JudgementGiven(user, r).into()) + assert_last_event::(Event::::JudgementGiven { target: user, registrar_index: r }.into()) } kill_identity { diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 4d86efd27e534..b4b52331618cb 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -241,28 +241,27 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A name was set or reset (which will remove all judgements). \[who\] - IdentitySet(T::AccountId), - /// A name was cleared, and the given balance returned. \[who, deposit\] - IdentityCleared(T::AccountId, BalanceOf), - /// A name was removed and the given balance slashed. \[who, deposit\] - IdentityKilled(T::AccountId, BalanceOf), - /// A judgement was asked from a registrar. \[who, registrar_index\] - JudgementRequested(T::AccountId, RegistrarIndex), - /// A judgement request was retracted. \[who, registrar_index\] - JudgementUnrequested(T::AccountId, RegistrarIndex), - /// A judgement was given by a registrar. \[target, registrar_index\] - JudgementGiven(T::AccountId, RegistrarIndex), - /// A registrar was added. \[registrar_index\] - RegistrarAdded(RegistrarIndex), - /// A sub-identity was added to an identity and the deposit paid. \[sub, main, deposit\] - SubIdentityAdded(T::AccountId, T::AccountId, BalanceOf), + /// A name was set or reset (which will remove all judgements). + IdentitySet { who: T::AccountId }, + /// A name was cleared, and the given balance returned. + IdentityCleared { who: T::AccountId, deposit: BalanceOf }, + /// A name was removed and the given balance slashed. + IdentityKilled { who: T::AccountId, deposit: BalanceOf }, + /// A judgement was asked from a registrar. + JudgementRequested { who: T::AccountId, registrar_index: RegistrarIndex }, + /// A judgement request was retracted. + JudgementUnrequested { who: T::AccountId, registrar_index: RegistrarIndex }, + /// A judgement was given by a registrar. + JudgementGiven { target: T::AccountId, registrar_index: RegistrarIndex }, + /// A registrar was added. + RegistrarAdded { registrar_index: RegistrarIndex }, + /// A sub-identity was added to an identity and the deposit paid. + SubIdentityAdded { sub: T::AccountId, main: T::AccountId, deposit: BalanceOf }, /// A sub-identity was removed from an identity and the deposit freed. - /// \[sub, main, deposit\] - SubIdentityRemoved(T::AccountId, T::AccountId, BalanceOf), + SubIdentityRemoved { sub: T::AccountId, main: T::AccountId, deposit: BalanceOf }, /// A sub-identity was cleared, and the given deposit repatriated from the - /// main identity account to the sub-identity account. \[sub, main, deposit\] - SubIdentityRevoked(T::AccountId, T::AccountId, BalanceOf), + /// main identity account to the sub-identity account. + SubIdentityRevoked { sub: T::AccountId, main: T::AccountId, deposit: BalanceOf }, } #[pallet::call] @@ -301,7 +300,7 @@ pub mod pallet { }, )?; - Self::deposit_event(Event::RegistrarAdded(i)); + Self::deposit_event(Event::RegistrarAdded { registrar_index: i }); Ok(Some(T::WeightInfo::add_registrar(registrar_count as u32)).into()) } @@ -364,7 +363,7 @@ pub mod pallet { let judgements = id.judgements.len(); >::insert(&sender, id); - Self::deposit_event(Event::IdentitySet(sender)); + Self::deposit_event(Event::IdentitySet { who: sender }); Ok(Some(T::WeightInfo::set_identity( judgements as u32, // R @@ -489,7 +488,7 @@ pub mod pallet { let err_amount = T::Currency::unreserve(&sender, deposit.clone()); debug_assert!(err_amount.is_zero()); - Self::deposit_event(Event::IdentityCleared(sender, deposit)); + Self::deposit_event(Event::IdentityCleared { who: sender, deposit }); Ok(Some(T::WeightInfo::clear_identity( id.judgements.len() as u32, // R @@ -558,7 +557,10 @@ pub mod pallet { let extra_fields = id.info.additional.len(); >::insert(&sender, id); - Self::deposit_event(Event::JudgementRequested(sender, reg_index)); + Self::deposit_event(Event::JudgementRequested { + who: sender, + registrar_index: reg_index, + }); Ok(Some(T::WeightInfo::request_judgement(judgements as u32, extra_fields as u32)) .into()) @@ -608,7 +610,10 @@ pub mod pallet { let extra_fields = id.info.additional.len(); >::insert(&sender, id); - Self::deposit_event(Event::JudgementUnrequested(sender, reg_index)); + Self::deposit_event(Event::JudgementUnrequested { + who: sender, + registrar_index: reg_index, + }); Ok(Some(T::WeightInfo::cancel_request(judgements as u32, extra_fields as u32)).into()) } @@ -791,7 +796,7 @@ pub mod pallet { let judgements = id.judgements.len(); let extra_fields = id.info.additional.len(); >::insert(&target, id); - Self::deposit_event(Event::JudgementGiven(target, reg_index)); + Self::deposit_event(Event::JudgementGiven { target, registrar_index: reg_index }); Ok(Some(T::WeightInfo::provide_judgement(judgements as u32, extra_fields as u32)) .into()) @@ -839,7 +844,7 @@ pub mod pallet { // Slash their deposit from them. T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit).0); - Self::deposit_event(Event::IdentityKilled(target, deposit)); + Self::deposit_event(Event::IdentityKilled { who: target, deposit }); Ok(Some(T::WeightInfo::kill_identity( id.judgements.len() as u32, // R @@ -882,7 +887,7 @@ pub mod pallet { sub_ids.try_push(sub.clone()).expect("sub ids length checked above; qed"); *subs_deposit = subs_deposit.saturating_add(deposit); - Self::deposit_event(Event::SubIdentityAdded(sub, sender.clone(), deposit)); + Self::deposit_event(Event::SubIdentityAdded { sub, main: sender.clone(), deposit }); Ok(()) }) } @@ -929,7 +934,7 @@ pub mod pallet { *subs_deposit -= deposit; let err_amount = T::Currency::unreserve(&sender, deposit); debug_assert!(err_amount.is_zero()); - Self::deposit_event(Event::SubIdentityRemoved(sub, sender, deposit)); + Self::deposit_event(Event::SubIdentityRemoved { sub, main: sender, deposit }); }); Ok(()) } @@ -954,7 +959,11 @@ pub mod pallet { *subs_deposit -= deposit; let _ = T::Currency::repatriate_reserved(&sup, &sender, deposit, BalanceStatus::Free); - Self::deposit_event(Event::SubIdentityRevoked(sender, sup.clone(), deposit)); + Self::deposit_event(Event::SubIdentityRevoked { + sub: sender, + main: sup.clone(), + deposit, + }); }); Ok(()) } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 2c5a7633c3b4a..718c735fdad41 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -374,12 +374,12 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A new heartbeat was received from `AuthorityId` \[authority_id\] - HeartbeatReceived(T::AuthorityId), + /// A new heartbeat was received from `AuthorityId`. + HeartbeatReceived { authority_id: T::AuthorityId }, /// At the end of the session, no offence was committed. AllGood, - /// At the end of the session, at least one validator was found to be \[offline\]. - SomeOffline(Vec>), + /// At the end of the session, at least one validator was found to be offline. + SomeOffline { offline: Vec> }, } #[pallet::error] @@ -495,7 +495,7 @@ pub mod pallet { let keys = Keys::::get(); let public = keys.get(heartbeat.authority_index as usize); if let (false, Some(public)) = (exists, public) { - Self::deposit_event(Event::::HeartbeatReceived(public.clone())); + Self::deposit_event(Event::::HeartbeatReceived { authority_id: public.clone() }); let network_state_bounded = BoundedOpaqueNetworkState::< T::MaxPeerDataEncodingSize, @@ -908,7 +908,7 @@ impl OneSessionHandler for Pallet { if offenders.is_empty() { Self::deposit_event(Event::::AllGood); } else { - Self::deposit_event(Event::::SomeOffline(offenders.clone())); + Self::deposit_event(Event::::SomeOffline { offline: offenders.clone() }); let validator_set_count = keys.len() as u32; let offence = UnresponsivenessOffence { session_index, validator_set_count, offenders }; diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 0901a89d41ad6..d8051bac0a3cd 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -105,7 +105,7 @@ pub mod pallet { *maybe_value = Some((who.clone(), T::Deposit::get(), false)); T::Currency::reserve(&who, T::Deposit::get()) })?; - Self::deposit_event(Event::IndexAssigned(who, index)); + Self::deposit_event(Event::IndexAssigned { who, index }); Ok(()) } @@ -146,7 +146,7 @@ pub mod pallet { *maybe_value = Some((new.clone(), amount.saturating_sub(lost), false)); Ok(()) })?; - Self::deposit_event(Event::IndexAssigned(new, index)); + Self::deposit_event(Event::IndexAssigned { who: new, index }); Ok(()) } @@ -179,7 +179,7 @@ pub mod pallet { T::Currency::unreserve(&who, amount); Ok(()) })?; - Self::deposit_event(Event::IndexFreed(index)); + Self::deposit_event(Event::IndexFreed { index }); Ok(()) } @@ -219,7 +219,7 @@ pub mod pallet { } *maybe_value = Some((new.clone(), Zero::zero(), freeze)); }); - Self::deposit_event(Event::IndexAssigned(new, index)); + Self::deposit_event(Event::IndexAssigned { who: new, index }); Ok(()) } @@ -253,7 +253,7 @@ pub mod pallet { *maybe_value = Some((account, Zero::zero(), true)); Ok(()) })?; - Self::deposit_event(Event::IndexFrozen(index, who)); + Self::deposit_event(Event::IndexFrozen { index, who }); Ok(()) } } @@ -261,12 +261,12 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A account index was assigned. \[index, who\] - IndexAssigned(T::AccountId, T::AccountIndex), - /// A account index has been freed up (unassigned). \[index\] - IndexFreed(T::AccountIndex), - /// A account index has been frozen to its current account ID. \[index, who\] - IndexFrozen(T::AccountIndex, T::AccountId), + /// A account index was assigned. + IndexAssigned { who: T::AccountId, index: T::AccountIndex }, + /// A account index has been freed up (unassigned). + IndexFreed { index: T::AccountIndex }, + /// A account index has been frozen to its current account ID. + IndexFrozen { index: T::AccountIndex, who: T::AccountId }, } /// Old name generated by `decl_event`. diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 260b4c2d76ae9..c1c536b8ba290 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -176,9 +176,9 @@ pub mod pallet { /// A new set of calls have been set! CallsUpdated, /// A winner has been chosen! - Winner(T::AccountId, BalanceOf), + Winner { winner: T::AccountId, lottery_balance: BalanceOf }, /// A ticket has been bought! - TicketBought(T::AccountId, CallIndex), + TicketBought { who: T::AccountId, call_index: CallIndex }, } #[pallet::error] @@ -250,7 +250,7 @@ pub mod pallet { ); debug_assert!(res.is_ok()); - Self::deposit_event(Event::::Winner(winner, lottery_balance)); + Self::deposit_event(Event::::Winner { winner, lottery_balance }); TicketsCount::::kill(); @@ -452,7 +452,7 @@ impl Pallet { }, )?; - Self::deposit_event(Event::::TicketBought(caller.clone(), call_index)); + Self::deposit_event(Event::::TicketBought { who: caller.clone(), call_index }); Ok(()) } diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 8fa2abb0ad3f3..6cd8c13f39aff 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -143,7 +143,7 @@ pub mod pallet { /// One of the members' keys changed. KeyChanged, /// Phantom member, never used. - Dummy(PhantomData<(T::AccountId, >::Event)>), + Dummy { _phantom_data: PhantomData<(T::AccountId, >::Event)> }, } /// Old name generated by `decl_event`. diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index c38ddf1793ee1..757a99b42dae8 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -205,21 +205,30 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A new multisig operation has begun. \[approving, multisig, call_hash\] - NewMultisig(T::AccountId, T::AccountId, CallHash), + /// A new multisig operation has begun. + NewMultisig { approving: T::AccountId, multisig: T::AccountId, call_hash: CallHash }, /// A multisig operation has been approved by someone. - /// \[approving, timepoint, multisig, call_hash\] - MultisigApproval(T::AccountId, Timepoint, T::AccountId, CallHash), - /// A multisig operation has been executed. \[approving, timepoint, multisig, call_hash\] - MultisigExecuted( - T::AccountId, - Timepoint, - T::AccountId, - CallHash, - DispatchResult, - ), - /// A multisig operation has been cancelled. \[canceling, timepoint, multisig, call_hash\] - MultisigCancelled(T::AccountId, Timepoint, T::AccountId, CallHash), + MultisigApproval { + approving: T::AccountId, + timepoint: Timepoint, + multisig: T::AccountId, + call_hash: CallHash, + }, + /// A multisig operation has been executed. + MultisigExecuted { + approving: T::AccountId, + timepoint: Timepoint, + multisig: T::AccountId, + call_hash: CallHash, + result: DispatchResult, + }, + /// A multisig operation has been cancelled. + MultisigCancelled { + cancelling: T::AccountId, + timepoint: Timepoint, + multisig: T::AccountId, + call_hash: CallHash, + }, } #[pallet::hooks] @@ -481,7 +490,12 @@ pub mod pallet { >::remove(&id, &call_hash); Self::clear_call(&call_hash); - Self::deposit_event(Event::MultisigCancelled(who, timepoint, id, call_hash)); + Self::deposit_event(Event::MultisigCancelled { + cancelling: who, + timepoint, + multisig: id, + call_hash, + }); Ok(()) } } @@ -557,13 +571,13 @@ impl Pallet { T::Currency::unreserve(&m.depositor, m.deposit); let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); - Self::deposit_event(Event::MultisigExecuted( - who, + Self::deposit_event(Event::MultisigExecuted { + approving: who, timepoint, - id, + multisig: id, call_hash, - result.map(|_| ()).map_err(|e| e.error), - )); + result: result.map(|_| ()).map_err(|e| e.error), + }); Ok(get_result_weight(result) .map(|actual_weight| { T::WeightInfo::as_multi_complete( @@ -594,7 +608,12 @@ impl Pallet { // Record approval. m.approvals.insert(pos, who.clone()); >::insert(&id, call_hash, m); - Self::deposit_event(Event::MultisigApproval(who, timepoint, id, call_hash)); + Self::deposit_event(Event::MultisigApproval { + approving: who, + timepoint, + multisig: id, + call_hash, + }); } else { // If we already approved and didn't store the Call, then this was useless and // we report an error. @@ -638,7 +657,7 @@ impl Pallet { approvals: vec![who.clone()], }, ); - Self::deposit_event(Event::NewMultisig(who, id, call_hash)); + Self::deposit_event(Event::NewMultisig { approving: who, multisig: id, call_hash }); let final_weight = if stored { T::WeightInfo::as_multi_create_store(other_signatories_len as u32, call_len as u32) diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index c5607c80abce4..523aefd1e753c 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -706,7 +706,14 @@ fn multisig_2_of_3_cannot_reissue_same_call() { let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); System::assert_last_event( - pallet_multisig::Event::MultisigExecuted(3, now(), multi, hash, Err(err)).into(), + pallet_multisig::Event::MultisigExecuted { + approving: 3, + timepoint: now(), + multisig: multi, + call_hash: hash, + result: Err(err), + } + .into(), ); }); } diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index f502a683f633c..438929576269c 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -89,16 +89,16 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A name was set. \[who\] - NameSet(T::AccountId), - /// A name was forcibly set. \[target\] - NameForced(T::AccountId), - /// A name was changed. \[who\] - NameChanged(T::AccountId), - /// A name was cleared, and the given balance returned. \[who, deposit\] - NameCleared(T::AccountId, BalanceOf), - /// A name was removed and the given balance slashed. \[target, deposit\] - NameKilled(T::AccountId, BalanceOf), + /// A name was set. + NameSet { who: T::AccountId }, + /// A name was forcibly set. + NameForced { target: T::AccountId }, + /// A name was changed. + NameChanged { who: T::AccountId }, + /// A name was cleared, and the given balance returned. + NameCleared { who: T::AccountId, deposit: BalanceOf }, + /// A name was removed and the given balance slashed. + NameKilled { target: T::AccountId, deposit: BalanceOf }, } /// Error for the nicks pallet. @@ -147,12 +147,12 @@ pub mod pallet { ensure!(name.len() <= T::MaxLength::get() as usize, Error::::TooLong); let deposit = if let Some((_, deposit)) = >::get(&sender) { - Self::deposit_event(Event::::NameChanged(sender.clone())); + Self::deposit_event(Event::::NameChanged { who: sender.clone() }); deposit } else { let deposit = T::ReservationFee::get(); T::Currency::reserve(&sender, deposit.clone())?; - Self::deposit_event(Event::::NameSet(sender.clone())); + Self::deposit_event(Event::::NameSet { who: sender.clone() }); deposit }; @@ -179,7 +179,7 @@ pub mod pallet { let err_amount = T::Currency::unreserve(&sender, deposit.clone()); debug_assert!(err_amount.is_zero()); - Self::deposit_event(Event::::NameCleared(sender, deposit)); + Self::deposit_event(Event::::NameCleared { who: sender, deposit }); Ok(()) } @@ -210,7 +210,7 @@ pub mod pallet { // Slash their deposit from them. T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit.clone()).0); - Self::deposit_event(Event::::NameKilled(target, deposit)); + Self::deposit_event(Event::::NameKilled { target, deposit }); Ok(()) } @@ -238,7 +238,7 @@ pub mod pallet { let deposit = >::get(&target).map(|x| x.1).unwrap_or_else(Zero::zero); >::insert(&target, (name, deposit)); - Self::deposit_event(Event::::NameForced(target)); + Self::deposit_event(Event::::NameForced { target }); Ok(()) } } diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 016f12d2eb838..6e3ec58ba63f9 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -128,24 +128,24 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// The given well known node was added. - NodeAdded(PeerId, T::AccountId), + NodeAdded { peer_id: PeerId, who: T::AccountId }, /// The given well known node was removed. - NodeRemoved(PeerId), + NodeRemoved { peer_id: PeerId }, /// The given well known node was swapped; first item was removed, /// the latter was added. - NodeSwapped(PeerId, PeerId), + NodeSwapped { removed: PeerId, added: PeerId }, /// The given well known nodes were reset. - NodesReset(Vec<(PeerId, T::AccountId)>), + NodesReset { nodes: Vec<(PeerId, T::AccountId)> }, /// The given node was claimed by a user. - NodeClaimed(PeerId, T::AccountId), + NodeClaimed { peer_id: PeerId, who: T::AccountId }, /// The given claim was removed by its owner. - ClaimRemoved(PeerId, T::AccountId), + ClaimRemoved { peer_id: PeerId, who: T::AccountId }, /// The node was transferred to another account. - NodeTransferred(PeerId, T::AccountId), + NodeTransferred { peer_id: PeerId, target: T::AccountId }, /// The allowed connections were added to a node. - ConnectionsAdded(PeerId, Vec), + ConnectionsAdded { peer_id: PeerId, allowed_connections: Vec }, /// The allowed connections were removed from a node. - ConnectionsRemoved(PeerId, Vec), + ConnectionsRemoved { peer_id: PeerId, allowed_connections: Vec }, } #[pallet::error] @@ -224,7 +224,7 @@ pub mod pallet { WellKnownNodes::::put(&nodes); >::insert(&node, &owner); - Self::deposit_event(Event::NodeAdded(node, owner)); + Self::deposit_event(Event::NodeAdded { peer_id: node, who: owner }); Ok(()) } @@ -248,7 +248,7 @@ pub mod pallet { >::remove(&node); AdditionalConnections::::remove(&node); - Self::deposit_event(Event::NodeRemoved(node)); + Self::deposit_event(Event::NodeRemoved { peer_id: node }); Ok(()) } @@ -284,7 +284,7 @@ pub mod pallet { Owners::::swap(&remove, &add); AdditionalConnections::::swap(&remove, &add); - Self::deposit_event(Event::NodeSwapped(remove, add)); + Self::deposit_event(Event::NodeSwapped { removed: remove, added: add }); Ok(()) } @@ -305,7 +305,7 @@ pub mod pallet { Self::initialize_nodes(&nodes); - Self::deposit_event(Event::NodesReset(nodes)); + Self::deposit_event(Event::NodesReset { nodes }); Ok(()) } @@ -321,7 +321,7 @@ pub mod pallet { ensure!(!Owners::::contains_key(&node), Error::::AlreadyClaimed); Owners::::insert(&node, &sender); - Self::deposit_event(Event::NodeClaimed(node, sender)); + Self::deposit_event(Event::NodeClaimed { peer_id: node, who: sender }); Ok(()) } @@ -342,7 +342,7 @@ pub mod pallet { Owners::::remove(&node); AdditionalConnections::::remove(&node); - Self::deposit_event(Event::ClaimRemoved(node, sender)); + Self::deposit_event(Event::ClaimRemoved { peer_id: node, who: sender }); Ok(()) } @@ -364,7 +364,7 @@ pub mod pallet { Owners::::insert(&node, &owner); - Self::deposit_event(Event::NodeTransferred(node, owner)); + Self::deposit_event(Event::NodeTransferred { peer_id: node, target: owner }); Ok(()) } @@ -395,7 +395,10 @@ pub mod pallet { AdditionalConnections::::insert(&node, nodes); - Self::deposit_event(Event::ConnectionsAdded(node, connections)); + Self::deposit_event(Event::ConnectionsAdded { + peer_id: node, + allowed_connections: connections, + }); Ok(()) } @@ -423,7 +426,10 @@ pub mod pallet { AdditionalConnections::::insert(&node, nodes); - Self::deposit_event(Event::ConnectionsRemoved(node, connections)); + Self::deposit_event(Event::ConnectionsRemoved { + peer_id: node, + allowed_connections: connections, + }); Ok(()) } } diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index c920b0b900dff..33ebe23d8d1fd 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -315,13 +315,13 @@ benchmarks! { ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) ); let balance_slash = |id| core::iter::once( - ::Event::from(pallet_balances::Event::::Slashed(id, slash_amount.into())) + ::Event::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) ); let chill = |id| core::iter::once( ::Event::from(StakingEvent::::Chilled(id)) ); let balance_deposit = |id, amount: u32| - ::Event::from(pallet_balances::Event::::Deposit(id, amount.into())); + ::Event::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); let mut first = true; let slash_events = raw_offenders.into_iter() .flat_map(|offender| { @@ -344,7 +344,7 @@ benchmarks! { balance_deposit(reporter.clone(), reward.into()).into(), frame_system::Event::::NewAccount(reporter.clone()).into(), ::Event::from( - pallet_balances::Event::::Endowed(reporter.clone(), reward.into()) + pallet_balances::Event::::Endowed{account: reporter.clone(), free_balance: reward.into()} ).into(), ]) .collect::>(); @@ -371,10 +371,10 @@ benchmarks! { std::iter::empty() .chain(slash_events.into_iter().map(Into::into)) .chain(std::iter::once(::Event::from( - pallet_offences::Event::Offence( - UnresponsivenessOffence::::ID, - 0_u32.to_le_bytes().to_vec(), - ) + pallet_offences::Event::Offence{ + kind: UnresponsivenessOffence::::ID, + timeslot: 0_u32.to_le_bytes().to_vec(), + } ).into())) ); } diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index d50bc55f88357..ddae73e280d57 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -108,7 +108,7 @@ pub mod pallet { /// There is an offence reported of the given `kind` happened at the `session_index` and /// (kind-specific) time slot. This event is not deposited for duplicate slashes. /// \[kind, timeslot\]. - Offence(Kind, OpaqueTimeSlot), + Offence { kind: Kind, timeslot: OpaqueTimeSlot }, } #[pallet::hooks] @@ -153,7 +153,7 @@ where ); // Deposit the event. - Self::deposit_event(Event::Offence(O::ID, time_slot.encode())); + Self::deposit_event(Event::Offence { kind: O::ID, timeslot: time_slot.encode() }); Ok(()) } diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index 18cfa9410a6c6..8c4fdcc08f995 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -114,7 +114,10 @@ fn should_deposit_event() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Offences(crate::Event::Offence(KIND, time_slot.encode())), + event: Event::Offences(crate::Event::Offence { + kind: KIND, + timeslot: time_slot.encode() + }), topics: vec![], }] ); @@ -145,7 +148,10 @@ fn doesnt_deposit_event_for_dups() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Offences(crate::Event::Offence(KIND, time_slot.encode())), + event: Event::Offences(crate::Event::Offence { + kind: KIND, + timeslot: time_slot.encode() + }), topics: vec![], }] ); diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 1eb3ec5770544..224610b65185b 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -86,7 +86,7 @@ benchmarks! { let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) verify { - assert_last_event::(Event::ProxyExecuted(Ok(())).into()) + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } proxy_announced { @@ -107,7 +107,7 @@ benchmarks! { add_announcements::(a, Some(delegate.clone()), None)?; }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) verify { - assert_last_event::(Event::ProxyExecuted(Ok(())).into()) + assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } remove_announcement { @@ -165,7 +165,7 @@ benchmarks! { let call_hash = T::CallHasher::hash_of(&call); }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) verify { - assert_last_event::(Event::Announced(real, caller, call_hash).into()); + assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); } add_proxy { @@ -216,12 +216,12 @@ benchmarks! { ) verify { let anon_account = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(Event::AnonymousCreated( - anon_account, - caller, - T::ProxyType::default(), - 0, - ).into()); + assert_last_event::(Event::AnonymousCreated { + anonymous: anon_account, + who: caller, + proxy_type: T::ProxyType::default(), + disambiguation_index: 0, + }.into()); } kill_anonymous { diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index b73101fa73486..695fa077f98d5 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -327,7 +327,12 @@ pub mod pallet { T::Currency::reserve(&who, deposit)?; Proxies::::insert(&anonymous, (bounded_proxies, deposit)); - Self::deposit_event(Event::AnonymousCreated(anonymous, who, proxy_type, index)); + Self::deposit_event(Event::AnonymousCreated { + anonymous, + who, + proxy_type, + disambiguation_index: index, + }); Ok(()) } @@ -427,7 +432,7 @@ pub mod pallet { }) .map(|d| *deposit = d) })?; - Self::deposit_event(Event::Announced(real, who, call_hash)); + Self::deposit_event(Event::Announced { real, proxy: who, call_hash }); Ok(()) } @@ -547,16 +552,25 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A proxy was executed correctly, with the given \[result\]. - ProxyExecuted(DispatchResult), + /// A proxy was executed correctly, with the given. + ProxyExecuted { result: DispatchResult }, /// Anonymous account has been created by new proxy with given - /// disambiguation index and proxy type. \[anonymous, who, proxy_type, - /// disambiguation_index\] - AnonymousCreated(T::AccountId, T::AccountId, T::ProxyType, u16), - /// An announcement was placed to make a call in the future. \[real, proxy, call_hash\] - Announced(T::AccountId, T::AccountId, CallHashOf), - /// A proxy was added. \[delegator, delegatee, proxy_type, delay\] - ProxyAdded(T::AccountId, T::AccountId, T::ProxyType, T::BlockNumber), + /// disambiguation index and proxy type. + AnonymousCreated { + anonymous: T::AccountId, + who: T::AccountId, + proxy_type: T::ProxyType, + disambiguation_index: u16, + }, + /// An announcement was placed to make a call in the future. + Announced { real: T::AccountId, proxy: T::AccountId, call_hash: CallHashOf }, + /// A proxy was added. + ProxyAdded { + delegator: T::AccountId, + delegatee: T::AccountId, + proxy_type: T::ProxyType, + delay: T::BlockNumber, + }, } /// Old name generated by `decl_event`. @@ -672,12 +686,12 @@ impl Pallet { T::Currency::unreserve(delegator, *deposit - new_deposit); } *deposit = new_deposit; - Self::deposit_event(Event::::ProxyAdded( - delegator.clone(), + Self::deposit_event(Event::::ProxyAdded { + delegator: delegator.clone(), delegatee, proxy_type, delay, - )); + }); Ok(()) }) } @@ -800,6 +814,6 @@ impl Pallet { } }); let e = call.dispatch(origin); - Self::deposit_event(Event::ProxyExecuted(e.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::ProxyExecuted { result: e.map(|_| ()).map_err(|e| e.error) }); } } diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index ed21a80f62139..d3565525910fb 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -211,7 +211,15 @@ fn call_transfer(dest: u64, value: u64) -> Call { fn announcement_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); - System::assert_last_event(ProxyEvent::ProxyAdded(1, 3, ProxyType::Any, 1).into()); + System::assert_last_event( + ProxyEvent::ProxyAdded { + delegator: 1, + delegatee: 3, + proxy_type: ProxyType::Any, + delay: 1, + } + .into(), + ); assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); assert_eq!(Balances::reserved_balance(3), 0); @@ -332,12 +340,12 @@ fn filtering_works() { let call = Box::new(call_transfer(6, 1)); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); let derivative_id = Utility::derivative_account_id(1, 0); @@ -347,31 +355,31 @@ fn filtering_works() { let call = Box::new(Call::Utility(UtilityCall::as_derivative { index: 0, call: inner.clone() })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), - ProxyEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } .into(), - ProxyEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); let inner = @@ -380,32 +388,32 @@ fn filtering_works() { assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), - ProxyEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } .into(), - ProxyEvent::ProxyExecuted(Ok(())).into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); let call = Box::new(Call::Proxy(ProxyCall::remove_proxies {})); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ - BalancesEvent::::Unreserved(1, 5).into(), - ProxyEvent::ProxyExecuted(Ok(())).into(), + BalancesEvent::::Unreserved { who: 1, amount: 5 }.into(), + ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); }); } @@ -476,13 +484,13 @@ fn proxying_works() { Error::::NotProxy ); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); let call = Box::new(Call::System(SystemCall::set_code { code: vec![] })); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); let call = @@ -490,10 +498,10 @@ fn proxying_works() { assert_ok!(Call::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) .dispatch(Origin::signed(2))); System::assert_last_event( - ProxyEvent::ProxyExecuted(Err(SystemError::CallFiltered.into())).into(), + ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); - System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 2); }); } @@ -504,7 +512,13 @@ fn anonymous_works() { assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( - ProxyEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0).into(), + ProxyEvent::AnonymousCreated { + anonymous: anon.clone(), + who: 1, + proxy_type: ProxyType::Any, + disambiguation_index: 0, + } + .into(), ); // other calls to anonymous allowed as long as they're not exactly the same. @@ -525,7 +539,7 @@ fn anonymous_works() { let call = Box::new(call_transfer(6, 1)); assert_ok!(Balances::transfer(Origin::signed(3), anon, 5)); assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call)); - System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); let call = Box::new(Call::Proxy(ProxyCall::new_call_variant_kill_anonymous( @@ -537,7 +551,7 @@ fn anonymous_works() { ))); assert_ok!(Proxy::proxy(Origin::signed(2), anon2, None, call.clone())); let de = DispatchError::from(Error::::NoPermission).stripped(); - System::assert_last_event(ProxyEvent::ProxyExecuted(Err(de)).into()); + System::assert_last_event(ProxyEvent::ProxyExecuted { result: Err(de) }.into()); assert_noop!( Proxy::kill_anonymous(Origin::signed(1), 1, ProxyType::Any, 0, 1, 0), Error::::NoPermission diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 68088d0e0d777..414c21aa347f0 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -190,7 +190,7 @@ pub(crate) struct SpanRecord { impl SpanRecord { /// The value of stash balance slashed in this span. #[cfg(test)] - pub(crate) fn amount_slashed(&self) -> &Balance { + pub(crate) fn amount(&self) -> &Balance { &self.slashed } } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 8e8a7ee636d8d..8f13fd7850803 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -2469,7 +2469,7 @@ fn garbage_collection_after_slashing() { assert_eq!(Balances::free_balance(11), 2000 - 200); assert!(::SlashingSpans::get(&11).is_some()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &200); + assert_eq!(::SpanSlash::get(&(11, 0)).amount(), &200); on_offence_now( &[OffenceDetails { @@ -2496,7 +2496,7 @@ fn garbage_collection_after_slashing() { assert_ok!(Staking::reap_stash(Origin::signed(20), 11, 2)); assert!(::SlashingSpans::get(&11).is_none()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); + assert_eq!(::SpanSlash::get(&(11, 0)).amount(), &0); }) } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 64cd5d5290635..a0d8aeb11706d 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -1363,9 +1363,11 @@ mod tests { )); assert_eq!(Balances::free_balance(2), 0); // Transfer Event - System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer( - 2, 3, 80, - ))); + System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer { + from: 2, + to: 3, + amount: 80, + })); // Killed Event System::assert_has_event(Event::System(system::Event::KilledAccount(2))); }); From 07d98e28cd7a79e45f02a982af14fbde929d8a16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Tue, 16 Nov 2021 07:52:27 +0100 Subject: [PATCH 102/162] Forward wasmer-sandbox feature to sp-sandbox (#10268) --- Cargo.lock | 1 + bin/node/runtime/Cargo.toml | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 99b2b53a74a4a..d93e70162926f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4758,6 +4758,7 @@ dependencies = [ "sp-npos-elections", "sp-offchain", "sp-runtime", + "sp-sandbox", "sp-session", "sp-staking", "sp-std", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index d086b6f12d590..9a481120fd01e 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -41,6 +41,7 @@ sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/npos-elections" } sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } +sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } # frame dependencies frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } @@ -257,3 +258,8 @@ try-runtime = [ # Make contract callable functions marked as __unstable__ available. Do not enable # on live chains as those are subject to change. contracts-unstable-interface = ["pallet-contracts/unstable-interface"] +# Force `sp-sandbox` to call into the host resident executor. One still need to make sure +# that `sc-executor` gets the `wasmer-sandbox` feature which happens automatically when +# specified on the command line. +# Don't use that on a production chain. +wasmer-sandbox = ["sp-sandbox/wasmer-sandbox"] From f846ee0d56e3882cdfa78616d8e9a36885379c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 16 Nov 2021 14:01:35 +0100 Subject: [PATCH 103/162] Cli: Introduce `--detailed-log-output` flag (#10278) * Cli: Introduce `--detailed-log-output` flag If this CLI flag is given, detailed log output will be enabled. This includes the log level, log target ad the thread name. Before this was only enabled when a log level higher than `info` should be logged. * Update client/tracing/src/logging/mod.rs Co-authored-by: David Co-authored-by: David --- client/cli/src/config.rs | 9 ++++- client/cli/src/params/shared_params.rs | 13 +++++++ client/tracing/src/logging/mod.rs | 54 ++++++++++++++++++-------- 3 files changed, 59 insertions(+), 17 deletions(-) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 86eeed5b40237..20e2bf0df5b2e 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -547,6 +547,11 @@ pub trait CliConfiguration: Sized { Ok(self.shared_params().log_filters().join(",")) } + /// Should the detailed log output be enabled. + fn detailed_log_output(&self) -> Result { + Ok(self.shared_params().detailed_log_output()) + } + /// Is log reloading enabled? fn enable_log_reloading(&self) -> Result { Ok(self.shared_params().enable_log_reloading()) @@ -568,7 +573,9 @@ pub trait CliConfiguration: Sized { sp_panic_handler::set(&C::support_url(), &C::impl_version()); let mut logger = LoggerBuilder::new(self.log_filters()?); - logger.with_log_reloading(self.enable_log_reloading()?); + logger + .with_log_reloading(self.enable_log_reloading()?) + .with_detailed_output(self.detailed_log_output()?); if let Some(tracing_targets) = self.tracing_targets()? { let tracing_receiver = self.tracing_receiver()?; diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 58aabb3148dd2..4a6fe0d2743ef 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -49,6 +49,14 @@ pub struct SharedParams { #[structopt(short = "l", long, value_name = "LOG_PATTERN")] pub log: Vec, + /// Enable detailed log output. + /// + /// This includes displaying the log target, log level and thread name. + /// + /// This is automatically enabled when something is logged with any higher level than `info`. + #[structopt(long)] + pub detailed_log_output: bool, + /// Disable log color output. #[structopt(long)] pub disable_log_color: bool, @@ -107,6 +115,11 @@ impl SharedParams { &self.log } + /// Should the detailed log output be enabled. + pub fn detailed_log_output(&self) -> bool { + self.detailed_log_output + } + /// Should the log color output be disabled? pub fn disable_log_color(&self) -> bool { self.disable_log_color diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 49807098d1cf4..521cfca30e29b 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -95,6 +95,7 @@ fn prepare_subscriber( directives: &str, profiling_targets: Option<&str>, force_colors: Option, + detailed_output: bool, builder_hook: impl Fn( SubscriberBuilder, ) -> SubscriberBuilder, @@ -157,19 +158,19 @@ where tracing_log::LogTracer::builder().with_max_level(max_level).init()?; // If we're only logging `INFO` entries then we'll use a simplified logging format. - let simple = match max_level_hint { - Some(level) if level <= tracing_subscriber::filter::LevelFilter::INFO => true, - _ => false, - }; + let detailed_output = match max_level_hint { + Some(level) if level <= tracing_subscriber::filter::LevelFilter::INFO => false, + _ => true, + } || detailed_output; let enable_color = force_colors.unwrap_or_else(|| atty::is(atty::Stream::Stderr)); - let timer = fast_local_time::FastLocalTime { with_fractional: !simple }; + let timer = fast_local_time::FastLocalTime { with_fractional: detailed_output }; let event_format = EventFormat { timer, - display_target: !simple, - display_level: !simple, - display_thread_name: !simple, + display_target: detailed_output, + display_level: detailed_output, + display_thread_name: detailed_output, enable_color, dup_to_stdout: !atty::is(atty::Stream::Stderr) && atty::is(atty::Stream::Stdout), }; @@ -194,6 +195,7 @@ pub struct LoggerBuilder { profiling: Option<(crate::TracingReceiver, String)>, log_reloading: bool, force_colors: Option, + detailed_output: bool, } impl LoggerBuilder { @@ -204,6 +206,7 @@ impl LoggerBuilder { profiling: None, log_reloading: false, force_colors: None, + detailed_output: false, } } @@ -223,6 +226,17 @@ impl LoggerBuilder { self } + /// Whether detailed log output should be enabled. + /// + /// This includes showing the log target, log level and thread name. + /// + /// This will be automatically enabled when there is a log level enabled that is higher than + /// `info`. + pub fn with_detailed_output(&mut self, detailed: bool) -> &mut Self { + self.detailed_output = detailed; + self + } + /// Force enable/disable colors. pub fn with_colors(&mut self, enable: bool) -> &mut Self { self.force_colors = Some(enable); @@ -239,6 +253,7 @@ impl LoggerBuilder { &self.directives, Some(&profiling_targets), self.force_colors, + self.detailed_output, |builder| enable_log_reloading!(builder), )?; let profiling = crate::ProfilingLayer::new(tracing_receiver, &profiling_targets); @@ -251,6 +266,7 @@ impl LoggerBuilder { &self.directives, Some(&profiling_targets), self.force_colors, + self.detailed_output, |builder| builder, )?; let profiling = crate::ProfilingLayer::new(tracing_receiver, &profiling_targets); @@ -261,19 +277,25 @@ impl LoggerBuilder { } } else { if self.log_reloading { - let subscriber = - prepare_subscriber(&self.directives, None, self.force_colors, |builder| { - enable_log_reloading!(builder) - })?; + let subscriber = prepare_subscriber( + &self.directives, + None, + self.force_colors, + self.detailed_output, + |builder| enable_log_reloading!(builder), + )?; tracing::subscriber::set_global_default(subscriber)?; Ok(()) } else { - let subscriber = - prepare_subscriber(&self.directives, None, self.force_colors, |builder| { - builder - })?; + let subscriber = prepare_subscriber( + &self.directives, + None, + self.force_colors, + self.detailed_output, + |builder| builder, + )?; tracing::subscriber::set_global_default(subscriber)?; From 74c39a23076bfd23328e1f1d1eed2f508a28247d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 16 Nov 2021 14:02:04 +0100 Subject: [PATCH 104/162] Vesting: Fix `post_migration` check (#10280) * Vesting: Fix `post_migration` check As the vesting migration could already have been done, people could already have started to merge schedules. * :facepalm: --- frame/vesting/src/migrations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/vesting/src/migrations.rs b/frame/vesting/src/migrations.rs index 086257d285ea0..f8024926f34aa 100644 --- a/frame/vesting/src/migrations.rs +++ b/frame/vesting/src/migrations.rs @@ -70,7 +70,7 @@ pub(crate) mod v1 { for (_key, schedules) in Vesting::::iter() { assert!( - schedules.len() == 1, + schedules.len() >= 1, "A bounded vec with incorrect count of items was created." ); From 9fec502b0310de85cdfab15cefe467e86d843233 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Nov 2021 14:03:48 +0100 Subject: [PATCH 105/162] Bump handlebars from 3.5.3 to 4.1.4 (#10273) Bumps [handlebars](https://github.com/sunng87/handlebars-rust) from 3.5.3 to 4.1.4. - [Release notes](https://github.com/sunng87/handlebars-rust/releases) - [Changelog](https://github.com/sunng87/handlebars-rust/blob/master/CHANGELOG.md) - [Commits](https://github.com/sunng87/handlebars-rust/compare/v3.5.3...v4.1.4) --- updated-dependencies: - dependency-name: handlebars dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- utils/frame/benchmarking-cli/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d93e70162926f..f1e8b00c6eb0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2529,9 +2529,9 @@ checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" [[package]] name = "handlebars" -version = "3.5.3" +version = "4.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb0867bbc5a3da37a753e78021d5fcf8a4db00e18dd2dd90fd36e24190e162d" +checksum = "e1874024f4a29f47d609014caec0b1c866f1c1eb0661a09c9733ecc4757f5f88" dependencies = [ "log 0.4.14", "pest", diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 03520f9cbd5d7..fab2b963239d3 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -28,7 +28,7 @@ codec = { version = "2.0.0", package = "parity-scale-codec" } structopt = "0.3.25" chrono = "0.4" serde = "1.0.126" -handlebars = "3.5.0" +handlebars = "4.1.4" Inflector = "0.11.4" linked-hash-map = "0.5.4" log = "0.4.8" From 9a00c43a1a81217548b3b7853271f07de6a6b858 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 16 Nov 2021 14:17:19 +0000 Subject: [PATCH 106/162] Update mod.rs (#10277) As many people are unbonding, I've had to explain this to a handful of people recently. This improves the error message a bit and puts it in the error description, so that it is shown in the front page of all explorers if `unbond` fails, hopefully making it clear. --- frame/staking/src/pallet/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 197c2eed325a1..ec7e86af958f1 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -608,7 +608,9 @@ pub mod pallet { DuplicateIndex, /// Slash record index out of bounds. InvalidSlashIndex, - /// Can not bond with value less than minimum required. + /// Cannot have a validator or nominator role, with value less than the minimum defined by + /// governance (see `MinValidatorBond` and `MinNominatorBond`). If unbonding is the + /// intention, `chill` first to remove one's role as validator/nominator. InsufficientBond, /// Can not schedule more unlock chunks. NoMoreChunks, From cc4bf9134b492d3a11f45c6b1cfa4fdf307e63e2 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Wed, 17 Nov 2021 15:16:28 +0800 Subject: [PATCH 107/162] pallet-session: Migrate the historical part to the new pallet macro (#9878) * Migrate session-historical to the new pallet macro Signed-off-by: koushiro * pallet-session: Migrate the historical part to the new pallet macro Signed-off-by: koushiro * Fix staking test runtime Signed-off-by: koushiro * Update frame/session/src/historical/mod.rs * Update frame/session/src/historical/mod.rs * update migration doc Signed-off-by: koushiro * use hardcoded prefix for migration v1 Signed-off-by: koushiro * cargo +nightly-2021-11-08 fmt Signed-off-by: koushiro Co-authored-by: Guillaume Thiolliere --- frame/session/benchmarking/src/lib.rs | 4 +- frame/session/src/historical/mod.rs | 136 ++++++++-------- frame/session/src/historical/offchain.rs | 4 +- frame/session/src/lib.rs | 1 + frame/session/src/migrations/mod.rs | 24 +++ frame/session/src/migrations/v1.rs | 194 +++++++++++++++++++++++ frame/session/src/tests.rs | 27 ++++ frame/staking/src/mock.rs | 1 + 8 files changed, 325 insertions(+), 66 deletions(-) create mode 100644 frame/session/src/migrations/mod.rs create mode 100644 frame/session/src/migrations/v1.rs diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 8ca713b1bbf61..6d9d81f385176 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -22,6 +22,7 @@ mod mock; +use sp_runtime::traits::{One, StaticLookup}; use sp_std::{prelude::*, vec}; use frame_benchmarking::benchmarks; @@ -30,12 +31,11 @@ use frame_support::{ traits::{KeyOwnerProofSystem, OnInitialize}, }; use frame_system::RawOrigin; -use pallet_session::{historical::Module as Historical, Pallet as Session, *}; +use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; use pallet_staking::{ benchmarking::create_validator_with_nominators, testing_utils::create_validators, RewardDestination, }; -use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 0801b2aca1701..a3e64f4f9efa4 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -26,62 +26,74 @@ //! These roots and proofs of inclusion can be generated at any time during the current session. //! Afterwards, the proofs can be fed to a consensus module when reporting misbehavior. -use super::{Pallet as SessionModule, SessionIndex}; +pub mod offchain; +pub mod onchain; +mod shared; + use codec::{Decode, Encode}; -use frame_support::{ - decl_module, decl_storage, print, - traits::{ValidatorSet, ValidatorSetWithIdentification}, - Parameter, -}; use sp_runtime::{ traits::{Convert, OpaqueKeys}, KeyTypeId, }; use sp_session::{MembershipProof, ValidatorCount}; +use sp_staking::SessionIndex; use sp_std::prelude::*; use sp_trie::{ trie_types::{TrieDB, TrieDBMut}, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, }; -pub mod offchain; -pub mod onchain; -mod shared; +use frame_support::{ + print, + traits::{KeyOwnerProofSystem, StorageVersion, ValidatorSet, ValidatorSetWithIdentification}, + Parameter, +}; -/// Config necessary for the historical module. -pub trait Config: super::Config { - /// Full identification of the validator. - type FullIdentification: Parameter; - - /// A conversion from validator ID to full identification. - /// - /// This should contain any references to economic actors associated with the - /// validator, since they may be outdated by the time this is queried from a - /// historical trie. - /// - /// It must return the identification for the current session index. - type FullIdentificationOf: Convert>; -} +use crate::{self as pallet_session, Pallet as Session}; + +pub use pallet::*; -decl_storage! { - trait Store for Module as Session { - /// Mapping from historical session indices to session-data root hash and validator count. - HistoricalSessions get(fn historical_root): - map hasher(twox_64_concat) SessionIndex => Option<(T::Hash, ValidatorCount)>; - /// The range of historical sessions we store. [first, last) - StoredRange: Option<(SessionIndex, SessionIndex)>; - /// Deprecated. - CachedObsolete: - map hasher(twox_64_concat) SessionIndex - => Option>; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + /// Config necessary for the historical pallet. + #[pallet::config] + pub trait Config: pallet_session::Config + frame_system::Config { + /// Full identification of the validator. + type FullIdentification: Parameter; + + /// A conversion from validator ID to full identification. + /// + /// This should contain any references to economic actors associated with the + /// validator, since they may be outdated by the time this is queried from a + /// historical trie. + /// + /// It must return the identification for the current session index. + type FullIdentificationOf: Convert>; } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + /// Mapping from historical session indices to session-data root hash and validator count. + #[pallet::storage] + #[pallet::getter(fn historical_root)] + pub type HistoricalSessions = + StorageMap<_, Twox64Concat, SessionIndex, (T::Hash, ValidatorCount), OptionQuery>; + + /// The range of historical sessions we store. [first, last) + #[pallet::storage] + pub type StoredRange = StorageValue<_, (SessionIndex, SessionIndex), OptionQuery>; } -impl Module { +impl Pallet { /// Prune historical stored session roots up to (but not including) /// `up_to`. pub fn prune_up_to(up_to: SessionIndex) { @@ -109,7 +121,7 @@ impl Module { } } -impl ValidatorSet for Module { +impl ValidatorSet for Pallet { type ValidatorId = T::ValidatorId; type ValidatorIdOf = T::ValidatorIdOf; @@ -122,7 +134,7 @@ impl ValidatorSet for Module { } } -impl ValidatorSetWithIdentification for Module { +impl ValidatorSetWithIdentification for Pallet { type Identification = T::FullIdentification; type IdentificationOf = T::FullIdentificationOf; } @@ -130,7 +142,7 @@ impl ValidatorSetWithIdentification for Module { /// Specialization of the crate-level `SessionManager` which returns the set of full identification /// when creating a new session. pub trait SessionManager: - crate::SessionManager + pallet_session::SessionManager { /// If there was a validator set change, its returns the set of new validators along with their /// full identifications. @@ -150,7 +162,7 @@ pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); impl> NoteHistoricalRoot { fn do_new_session(new_index: SessionIndex, is_genesis: bool) -> Option> { - StoredRange::mutate(|range| { + >::mutate(|range| { range.get_or_insert_with(|| (new_index, new_index)).1 = new_index + 1; }); @@ -183,7 +195,7 @@ impl> NoteHi } } -impl crate::SessionManager for NoteHistoricalRoot +impl pallet_session::SessionManager for NoteHistoricalRoot where I: SessionManager, { @@ -207,7 +219,7 @@ where /// A tuple of the validator's ID and their full identification. pub type IdentificationTuple = - (::ValidatorId, ::FullIdentification); + (::ValidatorId, ::FullIdentification); /// A trie instance for checking and generating proofs. pub struct ProvingTrie { @@ -227,7 +239,7 @@ impl ProvingTrie { let mut trie = TrieDBMut::new(&mut db, &mut root); for (i, (validator, full_id)) in validators.into_iter().enumerate() { let i = i as u32; - let keys = match >::load_keys(&validator) { + let keys = match >::load_keys(&validator) { None => continue, Some(k) => k, }; @@ -304,15 +316,13 @@ impl ProvingTrie { } } -impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> - for Module -{ +impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet { type Proof = MembershipProof; type IdentificationTuple = IdentificationTuple; fn prove(key: (KeyTypeId, D)) -> Option { - let session = >::current_index(); - let validators = >::validators() + let session = >::current_index(); + let validators = >::validators() .into_iter() .filter_map(|validator| { T::FullIdentificationOf::convert(validator.clone()) @@ -335,10 +345,10 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyT fn check_proof(key: (KeyTypeId, D), proof: Self::Proof) -> Option> { let (id, data) = key; - if proof.session == >::current_index() { - >::key_owner(id, data.as_ref()).and_then(|owner| { + if proof.session == >::current_index() { + >::key_owner(id, data.as_ref()).and_then(|owner| { T::FullIdentificationOf::convert(owner.clone()).and_then(move |id| { - let count = >::validators().len() as ValidatorCount; + let count = >::validators().len() as ValidatorCount; if count != proof.validator_count { return None @@ -374,7 +384,7 @@ pub(crate) mod tests { BasicExternalities, }; - type Historical = Module; + type Historical = Pallet; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -386,7 +396,9 @@ pub(crate) mod tests { frame_system::Pallet::::inc_providers(k); } }); - crate::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); + pallet_session::GenesisConfig:: { keys } + .assimilate_storage(&mut t) + .unwrap(); sp_io::TestExternalities::new(t) } @@ -436,27 +448,27 @@ pub(crate) mod tests { Session::on_initialize(i); } - assert_eq!(StoredRange::get(), Some((0, 100))); + assert_eq!(>::get(), Some((0, 100))); for i in 0..100 { assert!(Historical::historical_root(i).is_some()) } Historical::prune_up_to(10); - assert_eq!(StoredRange::get(), Some((10, 100))); + assert_eq!(>::get(), Some((10, 100))); Historical::prune_up_to(9); - assert_eq!(StoredRange::get(), Some((10, 100))); + assert_eq!(>::get(), Some((10, 100))); for i in 10..100 { assert!(Historical::historical_root(i).is_some()) } Historical::prune_up_to(99); - assert_eq!(StoredRange::get(), Some((99, 100))); + assert_eq!(>::get(), Some((99, 100))); Historical::prune_up_to(100); - assert_eq!(StoredRange::get(), None); + assert_eq!(>::get(), None); for i in 99..199u64 { set_next_validators(vec![i]); @@ -466,14 +478,14 @@ pub(crate) mod tests { Session::on_initialize(i); } - assert_eq!(StoredRange::get(), Some((100, 200))); + assert_eq!(>::get(), Some((100, 200))); for i in 100..200 { assert!(Historical::historical_root(i).is_some()) } Historical::prune_up_to(9999); - assert_eq!(StoredRange::get(), None); + assert_eq!(>::get(), None); for i in 100..200 { assert!(Historical::historical_root(i).is_none()) diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index b646ecc2764f7..0b292b57658d0 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -140,7 +140,7 @@ pub fn keep_newest(n_to_keep: usize) { mod tests { use super::*; use crate::{ - historical::{onchain, Module}, + historical::{onchain, Pallet}, mock::{force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS}, }; @@ -156,7 +156,7 @@ mod tests { BasicExternalities, }; - type Historical = Module; + type Historical = Pallet; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default() diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 6779285ee3187..2fd34365705bb 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -108,6 +108,7 @@ #[cfg(feature = "historical")] pub mod historical; +pub mod migrations; #[cfg(test)] mod mock; #[cfg(test)] diff --git a/frame/session/src/migrations/mod.rs b/frame/session/src/migrations/mod.rs new file mode 100644 index 0000000000000..ccc5ee3c2e525 --- /dev/null +++ b/frame/session/src/migrations/mod.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Version 1. +/// +/// In version 0 session historical pallet uses `Session` for storage module prefix. +/// In version 1 it uses its name as configured in `construct_runtime`. +/// This migration moves session historical pallet storages from old prefix to new prefix. +#[cfg(feature = "historical")] +pub mod v1; diff --git a/frame/session/src/migrations/v1.rs b/frame/session/src/migrations/v1.rs new file mode 100644 index 0000000000000..1de199fe7bedd --- /dev/null +++ b/frame/session/src/migrations/v1.rs @@ -0,0 +1,194 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_io::hashing::twox_128; +use sp_std::str; + +use frame_support::{ + storage::{generator::StorageValue, StoragePrefixedMap}, + traits::{ + Get, GetStorageVersion, PalletInfoAccess, StorageVersion, + STORAGE_VERSION_STORAGE_KEY_POSTFIX, + }, + weights::Weight, +}; + +use crate::historical as pallet_session_historical; + +const OLD_PREFIX: &str = "Session"; + +/// Migrate the entire storage of this pallet to a new prefix. +/// +/// This new prefix must be the same as the one set in construct_runtime. +/// +/// The migration will look into the storage version in order not to trigger a migration on an up +/// to date storage. Thus the on chain storage version must be less than 1 in order to trigger the +/// migration. +pub fn migrate( +) -> Weight { + let new_pallet_name =

::name(); + + if new_pallet_name == OLD_PREFIX { + log::info!( + target: "runtime::session_historical", + "New pallet name is equal to the old prefix. No migration needs to be done.", + ); + return 0 + } + + let on_chain_storage_version =

::on_chain_storage_version(); + log::info!( + target: "runtime::session_historical", + "Running migration to v1 for session_historical with storage version {:?}", + on_chain_storage_version, + ); + + if on_chain_storage_version < 1 { + let storage_prefix = pallet_session_historical::HistoricalSessions::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + OLD_PREFIX.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name); + + let storage_prefix = pallet_session_historical::StoredRange::::storage_prefix(); + frame_support::storage::migration::move_storage_from_pallet( + storage_prefix, + OLD_PREFIX.as_bytes(), + new_pallet_name.as_bytes(), + ); + log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name); + + StorageVersion::new(1).put::

(); + ::BlockWeights::get().max_block + } else { + log::warn!( + target: "runtime::session_historical", + "Attempted to apply migration to v1 but failed because storage version is {:?}", + on_chain_storage_version, + ); + 0 + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migrate< + T: pallet_session_historical::Config, + P: GetStorageVersion + PalletInfoAccess, +>() { + let new_pallet_name =

::name(); + + let storage_prefix_historical_sessions = + pallet_session_historical::HistoricalSessions::::storage_prefix(); + let storage_prefix_stored_range = pallet_session_historical::StoredRange::::storage_prefix(); + + log_migration("pre-migration", storage_prefix_historical_sessions, OLD_PREFIX, new_pallet_name); + log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name); + + if new_pallet_name == OLD_PREFIX { + return + } + + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX); + + let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |key| Ok(key.to_vec()), + ); + + // Ensure nothing except the storage_version_key is stored in the new prefix. + assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key)); + + assert!(

::on_chain_storage_version() < 1); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migrate< + T: pallet_session_historical::Config, + P: GetStorageVersion + PalletInfoAccess, +>() { + let new_pallet_name =

::name(); + + let storage_prefix_historical_sessions = + pallet_session_historical::HistoricalSessions::::storage_prefix(); + let storage_prefix_stored_range = pallet_session_historical::StoredRange::::storage_prefix(); + + log_migration( + "post-migration", + storage_prefix_historical_sessions, + OLD_PREFIX, + new_pallet_name, + ); + log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name); + + if new_pallet_name == OLD_PREFIX { + return + } + + // Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix. + let old_pallet_prefix = twox_128(OLD_PREFIX.as_bytes()); + let old_historical_sessions_key = + [&old_pallet_prefix, &twox_128(storage_prefix_historical_sessions)[..]].concat(); + let old_historical_sessions_key_iter = frame_support::storage::KeyPrefixIterator::new( + old_historical_sessions_key.to_vec(), + old_historical_sessions_key.to_vec(), + |_| Ok(()), + ); + assert_eq!(old_historical_sessions_key_iter.count(), 0); + + let old_stored_range_key = + [&old_pallet_prefix, &twox_128(storage_prefix_stored_range)[..]].concat(); + let old_stored_range_key_iter = frame_support::storage::KeyPrefixIterator::new( + old_stored_range_key.to_vec(), + old_stored_range_key.to_vec(), + |_| Ok(()), + ); + assert_eq!(old_stored_range_key_iter.count(), 0); + + // Assert that the `HistoricalSessions` and `StoredRange` storages (if they exist) have been + // moved to the new prefix. + // NOTE: storage_version_key is already in the new prefix. + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new( + new_pallet_prefix.to_vec(), + new_pallet_prefix.to_vec(), + |_| Ok(()), + ); + assert!(new_pallet_prefix_iter.count() >= 1); + + assert_eq!(

::on_chain_storage_version(), 1); +} + +fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) { + log::info!( + target: "runtime::session_historical", + "{} prefix of storage '{}': '{}' ==> '{}'", + stage, + str::from_utf8(storage_prefix).unwrap_or(""), + old_pallet_name, + new_pallet_name, + ); +} diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 308ed7c5e5487..cc0606edf499d 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -453,3 +453,30 @@ fn upgrade_keys() { } }) } + +#[cfg(feature = "historical")] +#[test] +fn test_migration_v1() { + use crate::{ + historical::{HistoricalSessions, StoredRange}, + mock::Historical, + }; + use frame_support::traits::PalletInfoAccess; + + new_test_ext().execute_with(|| { + assert!(>::iter_values().count() > 0); + assert!(>::exists()); + + let old_pallet = "Session"; + let new_pallet = ::name(); + frame_support::storage::migration::move_pallet( + new_pallet.as_bytes(), + old_pallet.as_bytes(), + ); + StorageVersion::new(0).put::(); + + crate::migrations::v1::pre_migrate::(); + crate::migrations::v1::migrate::(); + crate::migrations::v1::post_migrate::(); + }); +} diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 95d397359f8d6..e5a3e49033934 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -96,6 +96,7 @@ frame_support::construct_runtime!( Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Historical: pallet_session::historical::{Pallet, Storage}, BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, } ); From 0baa586a57803dffa09ce1051ef54a63ad6f40d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 17 Nov 2021 10:53:31 +0100 Subject: [PATCH 108/162] authority-discovery: Support multiple authority ids per peer id (#10259) * authority-discovery: Support multiple authority ids per peer id An peer id can be mapped to multiple authority ids, because an authority id is a session key that could be changed every session. Before this pr the internal authority discovery cache assumed that each authority id can only be mapped to one peer id. However, this isn't true since we changed the default implementation of the authority discovery to combine the current and next session authorities. * Review feedback * Update client/authority-discovery/src/worker/addr_cache.rs Co-authored-by: Andronik Ordian * Early return on no peer ids * Update client/authority-discovery/src/worker/addr_cache.rs Co-authored-by: Pierre Krieger * Update types in comment * FMT * Add warning * Update client/authority-discovery/src/worker/addr_cache.rs Co-authored-by: Andronik Ordian * Feedback Co-authored-by: Andronik Ordian Co-authored-by: Pierre Krieger --- client/authority-discovery/src/lib.rs | 11 +- client/authority-discovery/src/service.rs | 11 +- client/authority-discovery/src/tests.rs | 8 +- client/authority-discovery/src/worker.rs | 8 +- .../src/worker/addr_cache.rs | 242 +++++++++++------- .../authority-discovery/src/worker/tests.rs | 5 +- 6 files changed, 173 insertions(+), 112 deletions(-) diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 800f683aa0aef..1bbb9f38796c2 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -18,6 +18,7 @@ #![warn(missing_docs)] #![recursion_limit = "1024"] + //! Substrate authority discovery. //! //! This crate enables Substrate authorities to discover and directly connect to @@ -31,7 +32,7 @@ pub use crate::{ worker::{NetworkProvider, Role, Worker}, }; -use std::{sync::Arc, time::Duration}; +use std::{collections::HashSet, sync::Arc, time::Duration}; use futures::{ channel::{mpsc, oneshot}, @@ -58,11 +59,13 @@ pub struct WorkerConfig { /// /// By default this is set to 1 hour. pub max_publish_interval: Duration, + /// Interval at which the keystore is queried. If the keys have changed, unconditionally /// re-publish its addresses on the DHT. /// /// By default this is set to 1 minute. pub keystore_refresh_interval: Duration, + /// The maximum interval in which the node will query the DHT for new entries. /// /// By default this is set to 10 minutes. @@ -156,7 +159,7 @@ where /// Message send from the [`Service`] to the [`Worker`]. pub(crate) enum ServicetoWorkerMsg { /// See [`Service::get_addresses_by_authority_id`]. - GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), - /// See [`Service::get_authority_id_by_peer_id`]. - GetAuthorityIdByPeerId(PeerId, oneshot::Sender>), + GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), + /// See [`Service::get_authority_ids_by_peer_id`]. + GetAuthorityIdsByPeerId(PeerId, oneshot::Sender>>), } diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index 2e5ae66e4dd4a..9b59a4ec8647f 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fmt::Debug; +use std::{collections::HashSet, fmt::Debug}; use crate::ServicetoWorkerMsg; @@ -62,7 +62,7 @@ impl Service { pub async fn get_addresses_by_authority_id( &mut self, authority: AuthorityId, - ) -> Option> { + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_worker @@ -78,11 +78,14 @@ impl Service { /// /// Returns `None` if no entry was present or connection to the /// [`crate::Worker`] failed. - pub async fn get_authority_id_by_peer_id(&mut self, peer_id: PeerId) -> Option { + pub async fn get_authority_ids_by_peer_id( + &mut self, + peer_id: PeerId, + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_worker - .send(ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, tx)) + .send(ServicetoWorkerMsg::GetAuthorityIdsByPeerId(peer_id, tx)) .await .ok()?; diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 3784b4c834266..cef91445064ca 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -29,7 +29,7 @@ use libp2p::core::{ multiaddr::{Multiaddr, Protocol}, PeerId, }; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use sp_authority_discovery::AuthorityId; use sp_core::crypto::key_types; @@ -73,12 +73,12 @@ fn get_addresses_and_authority_id() { pool.run_until(async { assert_eq!( - Some(vec![remote_addr]), + Some(HashSet::from([remote_addr])), service.get_addresses_by_authority_id(remote_authority_id.clone()).await, ); assert_eq!( - Some(remote_authority_id), - service.get_authority_id_by_peer_id(remote_peer_id).await, + Some(HashSet::from([remote_authority_id])), + service.get_authority_ids_by_peer_id(remote_peer_id).await, ); }); } diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index a689d0bafd262..00021ecbdcb83 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -259,9 +259,9 @@ where self.addr_cache.get_addresses_by_authority_id(&authority).map(Clone::clone), ); }, - ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, sender) => { + ServicetoWorkerMsg::GetAuthorityIdsByPeerId(peer_id, sender) => { let _ = sender - .send(self.addr_cache.get_authority_id_by_peer_id(&peer_id).map(Clone::clone)); + .send(self.addr_cache.get_authority_ids_by_peer_id(&peer_id).map(Clone::clone)); }, } } @@ -374,7 +374,7 @@ where .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .filter(|id| !local_keys.contains(id.as_ref())) - .collect(); + .collect::>(); self.addr_cache.retain_ids(&authorities); @@ -548,7 +548,7 @@ where if let Some(metrics) = &self.metrics { metrics .known_authorities_count - .set(self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX)); + .set(self.addr_cache.num_authority_ids().try_into().unwrap_or(std::u64::MAX)); } } Ok(()) diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index e770297f6f3be..d4ba156d5fa19 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -17,79 +17,94 @@ // along with this program. If not, see . use libp2p::core::multiaddr::{Multiaddr, Protocol}; -use std::collections::HashMap; use sc_network::PeerId; use sp_authority_discovery::AuthorityId; +use std::collections::{hash_map::Entry, HashMap, HashSet}; -/// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. +/// Cache for [`AuthorityId`] -> [`HashSet`] and [`PeerId`] -> [`HashSet`] +/// mappings. pub(super) struct AddrCache { - // The addresses found in `authority_id_to_addresses` are guaranteed to always match - // the peerids found in `peer_id_to_authority_id`. In other words, these two hashmaps - // are similar to a bi-directional map. - authority_id_to_addresses: HashMap>, - peer_id_to_authority_id: HashMap, + /// The addresses found in `authority_id_to_addresses` are guaranteed to always match + /// the peerids found in `peer_id_to_authority_ids`. In other words, these two hashmaps + /// are similar to a bi-directional map. + /// + /// Since we may store the mapping across several sessions, a single + /// `PeerId` might correspond to multiple `AuthorityId`s. However, + /// it's not expected that a single `AuthorityId` can have multiple `PeerId`s. + authority_id_to_addresses: HashMap>, + peer_id_to_authority_ids: HashMap>, } impl AddrCache { pub fn new() -> Self { AddrCache { authority_id_to_addresses: HashMap::new(), - peer_id_to_authority_id: HashMap::new(), + peer_id_to_authority_ids: HashMap::new(), } } /// Inserts the given [`AuthorityId`] and [`Vec`] pair for future lookups by /// [`AuthorityId`] or [`PeerId`]. - pub fn insert(&mut self, authority_id: AuthorityId, mut addresses: Vec) { - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + pub fn insert(&mut self, authority_id: AuthorityId, addresses: Vec) { + let addresses = addresses.into_iter().collect::>(); + let peer_ids = addresses_to_peer_ids(&addresses); + + if peer_ids.is_empty() { + log::debug!( + target: super::LOG_TARGET, + "Authority({:?}) provides no addresses or addresses without peer ids. Adresses: {:?}", + authority_id, + addresses, + ); - // Insert into `self.peer_id_to_authority_id`. - let peer_ids = addresses - .iter() - .map(|a| peer_id_from_multiaddr(a)) - .filter_map(|peer_id| peer_id); - for peer_id in peer_ids.clone() { - let former_auth = - match self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()) { - Some(a) if a != authority_id => a, - _ => continue, - }; - - // PeerId was associated to a different authority id before. - // Remove corresponding authority from `self.authority_id_to_addresses`. - let former_auth_addrs = match self.authority_id_to_addresses.get_mut(&former_auth) { - Some(a) => a, - None => { - debug_assert!(false); - continue - }, - }; - former_auth_addrs.retain(|a| peer_id_from_multiaddr(a).map_or(true, |p| p != peer_id)); + return + } else if peer_ids.len() > 1 { + log::warn!( + target: super::LOG_TARGET, + "Authority({:?}) can be reached through multiple peer ids: {:?}", + authority_id, + peer_ids + ); } - // Insert into `self.authority_id_to_addresses`. - for former_addr in self - .authority_id_to_addresses - .insert(authority_id.clone(), addresses.clone()) - .unwrap_or_default() - { - // Must remove from `self.peer_id_to_authority_id` any PeerId formerly associated - // to that authority but that can't be found in its new addresses. - - let peer_id = match peer_id_from_multiaddr(&former_addr) { - Some(p) => p, - None => continue, - }; + let old_addresses = self.authority_id_to_addresses.insert(authority_id.clone(), addresses); + let old_peer_ids = addresses_to_peer_ids(&old_addresses.unwrap_or_default()); - if !peer_ids.clone().any(|p| p == peer_id) { - self.peer_id_to_authority_id.remove(&peer_id); + // Add the new peer ids + peer_ids.difference(&old_peer_ids).for_each(|new_peer_id| { + self.peer_id_to_authority_ids + .entry(*new_peer_id) + .or_default() + .insert(authority_id.clone()); + }); + + // Remove the old peer ids + self.remove_authority_id_from_peer_ids(&authority_id, old_peer_ids.difference(&peer_ids)); + } + + /// Remove the given `authority_id` from the `peer_id` to `authority_ids` mapping. + /// + /// If a `peer_id` doesn't have any `authority_id` assigned anymore, it is removed. + fn remove_authority_id_from_peer_ids<'a>( + &mut self, + authority_id: &AuthorityId, + peer_ids: impl Iterator, + ) { + peer_ids.for_each(|peer_id| { + if let Entry::Occupied(mut e) = self.peer_id_to_authority_ids.entry(*peer_id) { + e.get_mut().remove(authority_id); + + // If there are no more entries, remove the peer id. + if e.get().is_empty() { + e.remove(); + } } - } + }) } /// Returns the number of authority IDs in the cache. - pub fn num_ids(&self) -> usize { + pub fn num_authority_ids(&self) -> usize { self.authority_id_to_addresses.len() } @@ -97,18 +112,21 @@ impl AddrCache { pub fn get_addresses_by_authority_id( &self, authority_id: &AuthorityId, - ) -> Option<&Vec> { - self.authority_id_to_addresses.get(&authority_id) + ) -> Option<&HashSet> { + self.authority_id_to_addresses.get(authority_id) } - /// Returns the [`AuthorityId`] for the given [`PeerId`]. - pub fn get_authority_id_by_peer_id(&self, peer_id: &PeerId) -> Option<&AuthorityId> { - self.peer_id_to_authority_id.get(peer_id) + /// Returns the [`AuthorityId`]s for the given [`PeerId`]. + /// + /// As the authority id can change between sessions, one [`PeerId`] can be mapped to + /// multiple authority ids. + pub fn get_authority_ids_by_peer_id(&self, peer_id: &PeerId) -> Option<&HashSet> { + self.peer_id_to_authority_ids.get(peer_id) } /// Removes all [`PeerId`]s and [`Multiaddr`]s from the cache that are not related to the given /// [`AuthorityId`]s. - pub fn retain_ids(&mut self, authority_ids: &Vec) { + pub fn retain_ids(&mut self, authority_ids: &[AuthorityId]) { // The below logic could be replaced by `BtreeMap::drain_filter` once it stabilized. let authority_ids_to_remove = self .authority_id_to_addresses @@ -120,19 +138,18 @@ impl AddrCache { for authority_id_to_remove in authority_ids_to_remove { // Remove other entries from `self.authority_id_to_addresses`. - let addresses = self.authority_id_to_addresses.remove(&authority_id_to_remove); - - // Remove other entries from `self.peer_id_to_authority_id`. - let peer_ids = addresses - .iter() - .flatten() - .map(|a| peer_id_from_multiaddr(a)) - .filter_map(|peer_id| peer_id); - for peer_id in peer_ids { - if let Some(id) = self.peer_id_to_authority_id.remove(&peer_id) { - debug_assert_eq!(authority_id_to_remove, id); - } - } + let addresses = if let Some(addresses) = + self.authority_id_to_addresses.remove(&authority_id_to_remove) + { + addresses + } else { + continue + }; + + self.remove_authority_id_from_peer_ids( + &authority_id_to_remove, + addresses_to_peer_ids(&addresses).iter(), + ); } } } @@ -147,6 +164,13 @@ fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { }) } +fn addresses_to_peer_ids(addresses: &HashSet) -> HashSet { + addresses + .iter() + .filter_map(|a| peer_id_from_multiaddr(a)) + .collect::>() +} + #[cfg(test)] mod tests { use super::*; @@ -226,27 +250,27 @@ mod tests { cache.insert(third.0.clone(), vec![third.1.clone()]); assert_eq!( - Some(&vec![third.1.clone()]), + Some(&HashSet::from([third.1.clone()])), cache.get_addresses_by_authority_id(&third.0), - "Expect `get_addresses_by_authority_id` to return addresses of third authority." + "Expect `get_addresses_by_authority_id` to return addresses of third authority.", ); assert_eq!( - Some(&third.0), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), - "Expect `get_authority_id_by_peer_id` to return `AuthorityId` of third authority." + Some(&HashSet::from([third.0.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `AuthorityId` of third authority.", ); - cache.retain_ids(&vec![first.0, second.0]); + cache.retain_ids(&vec![first.0.clone(), second.0]); assert_eq!( None, cache.get_addresses_by_authority_id(&third.0), - "Expect `get_addresses_by_authority_id` to not return `None` for third authority." + "Expect `get_addresses_by_authority_id` to not return `None` for third authority.", ); assert_eq!( None, - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), - "Expect `get_authority_id_by_peer_id` to return `None` for third authority." + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + "Expect `get_authority_id_by_peer_id` to return `None` for third authority.", ); TestResult::passed() @@ -282,44 +306,47 @@ mod tests { assert_eq!( None, - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr1).unwrap()) + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr1).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr4).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr4).unwrap()) ); cache.insert(authority2.clone(), vec![multiaddr2.clone()]); assert_eq!( - Some(&authority2), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + Some(&HashSet::from([authority2.clone(), authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) ); assert_eq!( - Some(&authority1), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + Some(&HashSet::from([authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) ); - assert_eq!(cache.get_addresses_by_authority_id(&authority1).unwrap().len(), 2); + assert_eq!(cache.get_addresses_by_authority_id(&authority1).unwrap().len(), 3); cache.insert(authority2.clone(), vec![multiaddr2.clone(), multiaddr3.clone()]); assert_eq!( - Some(&authority2), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + Some(&HashSet::from([authority2.clone(), authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&HashSet::from([authority2.clone(), authority1.clone()])), + cache.get_authority_ids_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) ); assert_eq!( - Some(&authority2), - cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + &HashSet::from([multiaddr2.clone(), multiaddr3.clone(), multiaddr4.clone()]), + cache.get_addresses_by_authority_id(&authority1).unwrap(), ); - assert!(cache.get_addresses_by_authority_id(&authority1).unwrap().is_empty()); TestResult::passed() } @@ -328,4 +355,31 @@ mod tests { .max_tests(10) .quickcheck(property as fn(_, _, _, _, _) -> TestResult) } + + /// As the runtime gives us the current + next authority ids, it can happen that some + /// authority changed its session keys. Changing the sessions keys leads to having two + /// authority ids that map to the same `PeerId` & addresses. + #[test] + fn adding_two_authority_ids_for_the_same_peer_id() { + let mut addr_cache = AddrCache::new(); + + let peer_id = PeerId::random(); + let addr = Multiaddr::empty().with(Protocol::P2p(peer_id.into())); + + let authority_id0 = AuthorityPair::generate().0.public(); + let authority_id1 = AuthorityPair::generate().0.public(); + + addr_cache.insert(authority_id0.clone(), vec![addr.clone()]); + addr_cache.insert(authority_id1.clone(), vec![addr.clone()]); + + assert_eq!(2, addr_cache.num_authority_ids()); + assert_eq!( + &HashSet::from([addr.clone()]), + addr_cache.get_addresses_by_authority_id(&authority_id0).unwrap() + ); + assert_eq!( + &HashSet::from([addr]), + addr_cache.get_addresses_by_authority_id(&authority_id1).unwrap() + ); + } } diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 3c1610256f5bc..130aea71fdfb0 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -19,6 +19,7 @@ use crate::worker::schema; use std::{ + collections::HashSet, sync::{Arc, Mutex}, task::Poll, }; @@ -469,7 +470,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { .send(ServicetoWorkerMsg::GetAddressesByAuthorityId(remote_public_key, sender)) .await .expect("Channel has capacity of 1."); - assert_eq!(Some(vec![remote_multiaddr]), addresses.await.unwrap()); + assert_eq!(Some(HashSet::from([remote_multiaddr])), addresses.await.unwrap()); }); } @@ -562,7 +563,7 @@ fn do_not_cache_addresses_without_peer_id() { local_worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); assert_eq!( - Some(&vec![multiaddr_with_peer_id]), + Some(&HashSet::from([multiaddr_with_peer_id])), local_worker.addr_cache.get_addresses_by_authority_id(&remote_public.into()), "Expect worker to only cache `Multiaddr`s with `PeerId`s.", ); From e75d8e9a51b3d2b793018bd4d9ea772435a39c64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 17 Nov 2021 11:35:21 +0100 Subject: [PATCH 109/162] Offchain testing: Fix reading response (#10294) --- primitives/core/src/offchain/testing.rs | 2 +- primitives/runtime/src/offchain/http.rs | 32 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index 26bcdb66de836..29b9edb03deb5 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -341,7 +341,7 @@ impl offchain::Externalities for TestOffchainExt { Ok(0) } else { let read = std::cmp::min(buffer.len(), response[req.read..].len()); - buffer[0..read].copy_from_slice(&response[req.read..read]); + buffer[0..read].copy_from_slice(&response[req.read..req.read + read]); req.read += read; Ok(read) } diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index 469f2fb5aff3a..a8c82e616a476 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -535,6 +535,38 @@ mod tests { }) } + #[test] + fn should_send_huge_response() { + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = TestExternalities::default(); + t.register_extension(OffchainWorkerExt::new(offchain)); + + t.execute_with(|| { + let request: Request = Request::get("http://localhost:1234"); + let pending = request.add_header("X-Auth", "hunter2").send().unwrap(); + // make sure it's sent correctly + state.write().fulfill_pending_request( + 0, + testing::PendingRequest { + method: "GET".into(), + uri: "http://localhost:1234".into(), + headers: vec![("X-Auth".into(), "hunter2".into())], + sent: true, + ..Default::default() + }, + vec![0; 5923], + None, + ); + + // wait + let response = pending.wait().unwrap(); + + let body = response.body(); + assert_eq!(body.clone().collect::>(), vec![0; 5923]); + assert_eq!(body.error(), &None); + }) + } + #[test] fn should_send_a_post_request() { let (offchain, state) = testing::TestOffchainExt::new(); From 6d61f48427ae40b11dbebc2b548818773c3c7f8e Mon Sep 17 00:00:00 2001 From: Seun Lanlege Date: Wed, 17 Nov 2021 13:46:10 +0100 Subject: [PATCH 110/162] Check if BEEFY authority is in current set (#10281) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * check if BEEFY authority is in current set * Update client/beefy/src/round.rs Co-authored-by: Bastian Köcher * Update client/beefy/src/round.rs * Update client/beefy/src/round.rs Co-authored-by: Andreas Doerr * remove stray semi Co-authored-by: Bastian Köcher Co-authored-by: Andreas Doerr --- client/beefy/src/round.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index 7d443603b364e..51284c9bd2f6e 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -82,7 +82,11 @@ where } pub(crate) fn add_vote(&mut self, round: (H, N), vote: (Public, Signature)) -> bool { - self.rounds.entry(round).or_default().add_vote(vote) + if self.validator_set.validators.iter().any(|id| vote.0 == *id) { + self.rounds.entry(round).or_default().add_vote(vote) + } else { + false + } } pub(crate) fn is_done(&self, round: &(H, N)) -> bool { From 05a6abb9369426e862fc09fa51c544f1d03d5b71 Mon Sep 17 00:00:00 2001 From: wigy <1888808+wigy-opensource-developer@users.noreply.github.com> Date: Wed, 17 Nov 2021 15:11:02 +0100 Subject: [PATCH 111/162] Offence implementations can disable offenders independently from slashing (#10201) * Offence implementations can disable offenders independently from slashing * Fix build on CI * Run cargo fmt * Fixes based on review comments * Make parameter naming consistent * Fix migration and some English * Fix migration - again * cargo fmt * Cover 2 new cases with a test --- frame/offences/src/lib.rs | 1 + frame/offences/src/migration.rs | 9 +++- frame/offences/src/mock.rs | 3 +- frame/staking/src/mock.rs | 8 ++-- frame/staking/src/pallet/impls.rs | 4 +- frame/staking/src/slashing.rs | 70 +++++++++++++++---------------- frame/staking/src/tests.rs | 48 +++++++++++++++++++-- primitives/staking/src/offence.rs | 32 ++++++++++++++ 8 files changed, 129 insertions(+), 46 deletions(-) diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index ddae73e280d57..c230eac88dcee 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -150,6 +150,7 @@ where &concurrent_offenders, &slash_perbill, offence.session_index(), + offence.disable_strategy(), ); // Deposit the event. diff --git a/frame/offences/src/migration.rs b/frame/offences/src/migration.rs index b6e32cbe69e26..d655f2cec539a 100644 --- a/frame/offences/src/migration.rs +++ b/frame/offences/src/migration.rs @@ -19,7 +19,7 @@ use super::{Config, OffenceDetails, Perbill, SessionIndex}; use frame_support::{ generate_storage_alias, pallet_prelude::ValueQuery, traits::Get, weights::Weight, }; -use sp_staking::offence::OnOffenceHandler; +use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; use sp_std::vec::Vec; /// Type of data stored as a deferred offence @@ -41,7 +41,12 @@ pub fn remove_deferred_storage() -> Weight { let deferred = >::take(); log::info!(target: "runtime::offences", "have {} deferred offences, applying.", deferred.len()); for (offences, perbill, session) in deferred.iter() { - let consumed = T::OnOffenceHandler::on_offence(&offences, &perbill, *session); + let consumed = T::OnOffenceHandler::on_offence( + &offences, + &perbill, + *session, + DisableStrategy::WhenSlashed, + ); weight = weight.saturating_add(consumed); } diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 5e4c94944b6fd..bce51f527abc6 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -36,7 +36,7 @@ use sp_runtime::{ Perbill, }; use sp_staking::{ - offence::{self, Kind, OffenceDetails}, + offence::{self, DisableStrategy, Kind, OffenceDetails}, SessionIndex, }; use std::cell::RefCell; @@ -55,6 +55,7 @@ impl offence::OnOffenceHandler _offenders: &[OffenceDetails], slash_fraction: &[Perbill], _offence_session: SessionIndex, + _disable_strategy: DisableStrategy, ) -> Weight { ON_OFFENCE_PERBILL.with(|f| { *f.borrow_mut() = slash_fraction.to_vec(); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index e5a3e49033934..2b74b0188cff4 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -33,7 +33,7 @@ use sp_runtime::{ testing::{Header, TestXt, UintAuthorityId}, traits::{IdentityLookup, Zero}, }; -use sp_staking::offence::{OffenceDetails, OnOffenceHandler}; +use sp_staking::offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}; use std::cell::RefCell; pub const INIT_TIMESTAMP: u64 = 30_000; @@ -765,11 +765,12 @@ pub(crate) fn on_offence_in_era( >], slash_fraction: &[Perbill], era: EraIndex, + disable_strategy: DisableStrategy, ) { let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - let _ = Staking::on_offence(offenders, slash_fraction, start_session); + let _ = Staking::on_offence(offenders, slash_fraction, start_session, disable_strategy); return } else if bonded_era > era { break @@ -781,6 +782,7 @@ pub(crate) fn on_offence_in_era( offenders, slash_fraction, Staking::eras_start_session_index(era).unwrap(), + disable_strategy, ); } else { panic!("cannot slash in era {}", era); @@ -795,7 +797,7 @@ pub(crate) fn on_offence_now( slash_fraction: &[Perbill], ) { let now = Staking::active_era().unwrap().index; - on_offence_in_era(offenders, slash_fraction, now) + on_offence_in_era(offenders, slash_fraction, now, DisableStrategy::WhenSlashed) } pub(crate) fn add_slash(who: &AccountId) { diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 7ca1cb1a4a61b..8d86cfbe6b0d6 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -36,7 +36,7 @@ use sp_runtime::{ Perbill, }; use sp_staking::{ - offence::{OffenceDetails, OnOffenceHandler}, + offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, SessionIndex, }; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -1137,6 +1137,7 @@ where >], slash_fraction: &[Perbill], slash_session: SessionIndex, + disable_strategy: DisableStrategy, ) -> Weight { let reward_proportion = SlashRewardFraction::::get(); let mut consumed_weight: Weight = 0; @@ -1206,6 +1207,7 @@ where window_start, now: active_era, reward_proportion, + disable_strategy, }); if let Some(mut unapplied) = unapplied { diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 414c21aa347f0..066142d8ecc24 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -63,6 +63,7 @@ use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; +use sp_staking::offence::DisableStrategy; use sp_std::vec::Vec; /// The proportion of the slashing reward to be paid out on the first slashing detection. @@ -213,6 +214,8 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { /// The maximum percentage of a slash that ever gets paid out. /// This is f_inf in the paper. pub(crate) reward_proportion: Perbill, + /// When to disable offenders. + pub(crate) disable_strategy: DisableStrategy, } /// Computes a slash of a validator and nominators. It returns an unapplied @@ -224,15 +227,12 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { pub(crate) fn compute_slash( params: SlashParams, ) -> Option>> { - let SlashParams { stash, slash, exposure, slash_era, window_start, now, reward_proportion } = - params.clone(); - let mut reward_payout = Zero::zero(); let mut val_slashed = Zero::zero(); // is the slash amount here a maximum for the era? - let own_slash = slash * exposure.own; - if slash * exposure.total == Zero::zero() { + let own_slash = params.slash * params.exposure.own; + if params.slash * params.exposure.total == Zero::zero() { // kick out the validator even if they won't be slashed, // as long as the misbehavior is from their most recent slashing span. kick_out_if_recent::(params); @@ -240,13 +240,17 @@ pub(crate) fn compute_slash( } let (prior_slash_p, _era_slash) = - as Store>::ValidatorSlashInEra::get(&slash_era, stash) + as Store>::ValidatorSlashInEra::get(¶ms.slash_era, params.stash) .unwrap_or((Perbill::zero(), Zero::zero())); // compare slash proportions rather than slash values to avoid issues due to rounding // error. - if slash.deconstruct() > prior_slash_p.deconstruct() { - as Store>::ValidatorSlashInEra::insert(&slash_era, stash, &(slash, own_slash)); + if params.slash.deconstruct() > prior_slash_p.deconstruct() { + as Store>::ValidatorSlashInEra::insert( + ¶ms.slash_era, + params.stash, + &(params.slash, own_slash), + ); } else { // we slash based on the max in era - this new event is not the max, // so neither the validator or any nominators will need an update. @@ -261,14 +265,14 @@ pub(crate) fn compute_slash( // apply slash to validator. { let mut spans = fetch_spans::( - stash, - window_start, + params.stash, + params.window_start, &mut reward_payout, &mut val_slashed, - reward_proportion, + params.reward_proportion, ); - let target_span = spans.compare_and_update_span_slash(slash_era, own_slash); + let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash); if target_span == Some(spans.span_index()) { // misbehavior occurred within the current slashing span - take appropriate @@ -276,20 +280,19 @@ pub(crate) fn compute_slash( // chill the validator - it misbehaved in the current span and should // not continue in the next election. also end the slashing span. - spans.end_span(now); - >::chill_stash(stash); + spans.end_span(params.now); + >::chill_stash(params.stash); } } - // add the validator to the offenders list and make sure it is disabled for - // the duration of the era - add_offending_validator::(params.stash, true); + let disable_when_slashed = params.disable_strategy != DisableStrategy::Never; + add_offending_validator::(params.stash, disable_when_slashed); let mut nominators_slashed = Vec::new(); - reward_payout += slash_nominators::(params, prior_slash_p, &mut nominators_slashed); + reward_payout += slash_nominators::(params.clone(), prior_slash_p, &mut nominators_slashed); Some(UnappliedSlash { - validator: stash.clone(), + validator: params.stash.clone(), own: val_slashed, others: nominators_slashed, reporters: Vec::new(), @@ -316,9 +319,8 @@ fn kick_out_if_recent(params: SlashParams) { >::chill_stash(params.stash); } - // add the validator to the offenders list but since there's no slash being - // applied there's no need to disable the validator - add_offending_validator::(params.stash, false); + let disable_without_slash = params.disable_strategy == DisableStrategy::Always; + add_offending_validator::(params.stash, disable_without_slash); } /// Add the given validator to the offenders list and optionally disable it. @@ -371,13 +373,10 @@ fn slash_nominators( prior_slash_p: Perbill, nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, ) -> BalanceOf { - let SlashParams { stash: _, slash, exposure, slash_era, window_start, now, reward_proportion } = - params; - let mut reward_payout = Zero::zero(); - nominators_slashed.reserve(exposure.others.len()); - for nominator in &exposure.others { + nominators_slashed.reserve(params.exposure.others.len()); + for nominator in ¶ms.exposure.others { let stash = &nominator.who; let mut nom_slashed = Zero::zero(); @@ -385,15 +384,16 @@ fn slash_nominators( // had a new max slash for the era. let era_slash = { let own_slash_prior = prior_slash_p * nominator.value; - let own_slash_by_validator = slash * nominator.value; + let own_slash_by_validator = params.slash * nominator.value; let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); - let mut era_slash = as Store>::NominatorSlashInEra::get(&slash_era, stash) - .unwrap_or_else(|| Zero::zero()); + let mut era_slash = + as Store>::NominatorSlashInEra::get(¶ms.slash_era, stash) + .unwrap_or_else(|| Zero::zero()); era_slash += own_slash_difference; - as Store>::NominatorSlashInEra::insert(&slash_era, stash, &era_slash); + as Store>::NominatorSlashInEra::insert(¶ms.slash_era, stash, &era_slash); era_slash }; @@ -402,18 +402,18 @@ fn slash_nominators( { let mut spans = fetch_spans::( stash, - window_start, + params.window_start, &mut reward_payout, &mut nom_slashed, - reward_proportion, + params.reward_proportion, ); - let target_span = spans.compare_and_update_span_slash(slash_era, era_slash); + let target_span = spans.compare_and_update_span_slash(params.slash_era, era_slash); if target_span == Some(spans.span_index()) { // End the span, but don't chill the nominator. its nomination // on this validator will be ignored in the future. - spans.end_span(now); + spans.end_span(params.now); } } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 8f13fd7850803..f8f37bed0066c 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -34,7 +34,7 @@ use sp_runtime::{ Perbill, Percent, }; use sp_staking::{ - offence::{OffenceDetails, OnOffenceHandler}, + offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, SessionIndex, }; use sp_std::prelude::*; @@ -2250,6 +2250,7 @@ fn slash_in_old_span_does_not_deselect() { }], &[Perbill::from_percent(0)], 1, + DisableStrategy::WhenSlashed, ); // the validator doesn't get chilled again @@ -2266,6 +2267,7 @@ fn slash_in_old_span_does_not_deselect() { // NOTE: A 100% slash here would clean up the account, causing de-registration. &[Perbill::from_percent(95)], 1, + DisableStrategy::WhenSlashed, ); // the validator doesn't get chilled again @@ -2562,6 +2564,7 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(10)], 2, + DisableStrategy::WhenSlashed, ); assert_eq!(Balances::free_balance(11), 900); @@ -2588,6 +2591,7 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(30)], 3, + DisableStrategy::WhenSlashed, ); // 11 was not further slashed, but 21 and 101 were. @@ -2609,6 +2613,7 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(20)], 2, + DisableStrategy::WhenSlashed, ); // 11 was further slashed, but 21 and 101 were not. @@ -2744,6 +2749,7 @@ fn remove_deferred() { &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], 1, + DisableStrategy::WhenSlashed, ); // fails if empty @@ -2933,6 +2939,40 @@ fn non_slashable_offence_doesnt_disable_validator() { }); } +#[test] +fn slashing_independent_of_disabling_validator() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + + let now = Staking::active_era().unwrap().index; + + // offence with no slash associated, BUT disabling + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + now, + DisableStrategy::Always, + ); + + // offence that slashes 25% of the bond, BUT not disabling + on_offence_in_era( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + now, + DisableStrategy::Never, + ); + + // the offence for validator 10 was explicitly disabled + assert!(is_disabled(10)); + // whereas validator 20 is explicitly not disabled + assert!(!is_disabled(20)); + }); +} + #[test] fn offence_threshold_triggers_new_era() { ExtBuilder::default() @@ -3595,7 +3635,7 @@ fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); - assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), zero_offence_weight); + assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), zero_offence_weight); // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) @@ -3608,7 +3648,7 @@ fn offences_weight_calculated_correctly() { reporters: vec![], } ).collect(); - assert_eq!(Staking::on_offence(&offenders, &[Perbill::from_percent(50)], 0), n_offence_unapplied_weight); + assert_eq!(Staking::on_offence(&offenders, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), n_offence_unapplied_weight); // On Offence with one offenders, Applied let one_offender = [ @@ -3629,7 +3669,7 @@ fn offences_weight_calculated_correctly() { // `reward_cost` * reporters (1) + ::DbWeight::get().reads_writes(2, 2); - assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0), one_offence_unapplied_weight); + assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), one_offence_unapplied_weight); }); } diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index a91cb47c117b6..fdff02d42065e 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -37,6 +37,29 @@ pub type Kind = [u8; 16]; /// so that we can slash it accordingly. pub type OffenceCount = u32; +/// In case of an offence, which conditions get an offending validator disabled. +#[derive( + Clone, + Copy, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + Encode, + Decode, + sp_runtime::RuntimeDebug, + scale_info::TypeInfo, +)] +pub enum DisableStrategy { + /// Independently of slashing, this offence will not disable the offender. + Never, + /// Only disable the offender if it is also slashed. + WhenSlashed, + /// Independently of slashing, this offence will always disable the offender. + Always, +} + /// A trait implemented by an offence report. /// /// This trait assumes that the offence is legitimate and was validated already. @@ -79,6 +102,11 @@ pub trait Offence { /// number. Note that for GRANDPA the round number is reset each epoch. fn time_slot(&self) -> Self::TimeSlot; + /// In which cases this offence needs to disable offenders until the next era starts. + fn disable_strategy(&self) -> DisableStrategy { + DisableStrategy::WhenSlashed + } + /// A slash fraction of the total exposure that should be slashed for this /// particular offence kind for the given parameters that happened at a singular `TimeSlot`. /// @@ -150,12 +178,15 @@ pub trait OnOffenceHandler { /// /// The `session` parameter is the session index of the offence. /// + /// The `disable_strategy` parameter decides if the offenders need to be disabled immediately. + /// /// The receiver might decide to not accept this offence. In this case, the call site is /// responsible for queuing the report and re-submitting again. fn on_offence( offenders: &[OffenceDetails], slash_fraction: &[Perbill], session: SessionIndex, + disable_strategy: DisableStrategy, ) -> Res; } @@ -164,6 +195,7 @@ impl OnOffenceHandler _offenders: &[OffenceDetails], _slash_fraction: &[Perbill], _session: SessionIndex, + _disable_strategy: DisableStrategy, ) -> Res { Default::default() } From 4c98bbab7fffc9b79525b31caf0e2f346b26185e Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 18 Nov 2021 00:14:16 -0400 Subject: [PATCH 112/162] Fix Weight Handlebar Template (#10302) * fix template * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_identity --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/identity/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Bot --- .maintain/frame-weight-template.hbs | 68 +++---- frame/identity/src/weights.rs | 191 +++++++++--------- utils/frame/benchmarking-cli/src/template.hbs | 30 +-- 3 files changed, 144 insertions(+), 145 deletions(-) diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 045140d54dff7..4f34707b96020 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -22,7 +22,7 @@ //! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} // Executed Command: -{{#each args as |arg|~}} +{{#each args as |arg|}} // {{arg}} {{/each}} @@ -35,80 +35,80 @@ use sp_std::marker::PhantomData; /// Weight functions needed for {{pallet}}. pub trait WeightInfo { - {{~#each benchmarks as |benchmark|}} + {{#each benchmarks as |benchmark|}} fn {{benchmark.name~}} ( {{~#each benchmark.components as |c| ~}} {{c.name}}: u32, {{/each~}} ) -> Weight; - {{~/each}} + {{/each}} } /// Weights for {{pallet}} using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); -{{~#if (eq pallet "frame_system")}} +{{#if (eq pallet "frame_system")}} impl WeightInfo for SubstrateWeight { -{{~else}} +{{else}} impl WeightInfo for SubstrateWeight { -{{~/if}} - {{~#each benchmarks as |benchmark|}} - {{~#each benchmark.comments as |comment|}} +{{/if}} + {{#each benchmarks as |benchmark|}} + {{#each benchmark.comments as |comment|}} // {{comment}} - {{~/each}} + {{/each}} fn {{benchmark.name~}} ( {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} + {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} + {{/each}} + {{#if (ne benchmark.base_reads "0")}} .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} + {{/if}} + {{#each benchmark.component_reads as |cr|}} .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} + {{/each}} + {{#if (ne benchmark.base_writes "0")}} .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} + {{/if}} + {{#each benchmark.component_writes as |cw|}} .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} + {{/each}} } - {{~/each}} + {{/each}} } // For backwards compatibility and tests impl WeightInfo for () { - {{~#each benchmarks as |benchmark|}} - {{~#each benchmark.comments as |comment|}} + {{#each benchmarks as |benchmark|}} + {{#each benchmark.comments as |comment|}} // {{comment}} - {{~/each}} + {{/each}} fn {{benchmark.name~}} ( {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} + {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} + {{/each}} + {{#if (ne benchmark.base_reads "0")}} .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} + {{/if}} + {{#each benchmark.component_reads as |cr|}} .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} + {{/each}} + {{#if (ne benchmark.base_writes "0")}} .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} + {{/if}} + {{#each benchmark.component_writes as |cw|}} .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} + {{/each}} } - {{~/each}} + {{/each}} } diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 611909f326eab..92bc4cbaae924 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-11-17, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -35,7 +35,6 @@ // --output=./frame/identity/src/weights.rs // --template=./.maintain/frame-weight-template.hbs - #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -68,19 +67,19 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:1) fn add_registrar(r: u32, ) -> Weight { - (22_152_000 as Weight) - // Standard Error: 6_000 - .saturating_add((339_000 as Weight).saturating_mul(r as Weight)) + (19_176_000 as Weight) + // Standard Error: 5_000 + .saturating_add((313_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) fn set_identity(r: u32, x: u32, ) -> Weight { - (53_017_000 as Weight) - // Standard Error: 14_000 - .saturating_add((279_000 as Weight).saturating_mul(r as Weight)) + (44_668_000 as Weight) + // Standard Error: 12_000 + .saturating_add((244_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 1_000 - .saturating_add((1_081_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((811_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -88,9 +87,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) fn set_subs_new(s: u32, ) -> Weight { - (44_693_000 as Weight) - // Standard Error: 1_000 - .saturating_add((6_631_000 as Weight).saturating_mul(s as Weight)) + (38_917_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_331_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -100,9 +99,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) fn set_subs_old(p: u32, ) -> Weight { - (42_017_000 as Weight) + (36_057_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_193_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((1_756_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) @@ -111,13 +110,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity IdentityOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (50_989_000 as Weight) - // Standard Error: 11_000 - .saturating_add((258_000 as Weight).saturating_mul(r as Weight)) + (44_348_000 as Weight) + // Standard Error: 9_000 + .saturating_add((183_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 1_000 - .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_724_000 as Weight).saturating_mul(s as Weight)) // Standard Error: 1_000 - .saturating_add((579_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((439_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) @@ -125,56 +124,56 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) fn request_judgement(r: u32, x: u32, ) -> Weight { - (55_562_000 as Weight) + (46_592_000 as Weight) // Standard Error: 5_000 - .saturating_add((317_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((321_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((1_137_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((858_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) fn cancel_request(r: u32, x: u32, ) -> Weight { - (51_744_000 as Weight) + (43_556_000 as Weight) // Standard Error: 6_000 - .saturating_add((192_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((174_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((1_131_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((850_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) fn set_fee(r: u32, ) -> Weight { - (9_472_000 as Weight) - // Standard Error: 3_000 - .saturating_add((321_000 as Weight).saturating_mul(r as Weight)) + (7_971_000 as Weight) + // Standard Error: 4_000 + .saturating_add((283_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) fn set_account_id(r: u32, ) -> Weight { - (9_705_000 as Weight) - // Standard Error: 3_000 - .saturating_add((312_000 as Weight).saturating_mul(r as Weight)) + (8_234_000 as Weight) + // Standard Error: 4_000 + .saturating_add((280_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) fn set_fields(r: u32, ) -> Weight { - (9_537_000 as Weight) - // Standard Error: 3_000 - .saturating_add((318_000 as Weight).saturating_mul(r as Weight)) + (8_126_000 as Weight) + // Standard Error: 4_000 + .saturating_add((275_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) fn provide_judgement(r: u32, x: u32, ) -> Weight { - (36_298_000 as Weight) + (30_949_000 as Weight) // Standard Error: 5_000 - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((286_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((1_141_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((856_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -183,11 +182,11 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) fn kill_identity(r: u32, s: u32, _x: u32, ) -> Weight { - (63_238_000 as Weight) - // Standard Error: 10_000 - .saturating_add((246_000 as Weight).saturating_mul(r as Weight)) + (63_792_000 as Weight) + // Standard Error: 11_000 + .saturating_add((242_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 1_000 - .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_738_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) @@ -196,18 +195,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) fn add_sub(s: u32, ) -> Weight { - (57_394_000 as Weight) + (48_751_000 as Weight) // Standard Error: 1_000 - .saturating_add((208_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((193_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) fn rename_sub(s: u32, ) -> Weight { - (18_274_000 as Weight) + (15_892_000 as Weight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((49_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -215,18 +214,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) fn remove_sub(s: u32, ) -> Weight { - (58_184_000 as Weight) + (49_746_000 as Weight) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((181_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) fn quit_sub(s: u32, ) -> Weight { - (36_304_000 as Weight) + (32_286_000 as Weight) // Standard Error: 1_000 - .saturating_add((191_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((166_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -236,19 +235,19 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:1) fn add_registrar(r: u32, ) -> Weight { - (22_152_000 as Weight) - // Standard Error: 6_000 - .saturating_add((339_000 as Weight).saturating_mul(r as Weight)) + (19_176_000 as Weight) + // Standard Error: 5_000 + .saturating_add((313_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) fn set_identity(r: u32, x: u32, ) -> Weight { - (53_017_000 as Weight) - // Standard Error: 14_000 - .saturating_add((279_000 as Weight).saturating_mul(r as Weight)) + (44_668_000 as Weight) + // Standard Error: 12_000 + .saturating_add((244_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 1_000 - .saturating_add((1_081_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((811_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -256,9 +255,9 @@ impl WeightInfo for () { // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) fn set_subs_new(s: u32, ) -> Weight { - (44_693_000 as Weight) - // Standard Error: 1_000 - .saturating_add((6_631_000 as Weight).saturating_mul(s as Weight)) + (38_917_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_331_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -268,9 +267,9 @@ impl WeightInfo for () { // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) fn set_subs_old(p: u32, ) -> Weight { - (42_017_000 as Weight) + (36_057_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_193_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((1_756_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) @@ -279,13 +278,13 @@ impl WeightInfo for () { // Storage: Identity IdentityOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (50_989_000 as Weight) - // Standard Error: 11_000 - .saturating_add((258_000 as Weight).saturating_mul(r as Weight)) + (44_348_000 as Weight) + // Standard Error: 9_000 + .saturating_add((183_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 1_000 - .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_724_000 as Weight).saturating_mul(s as Weight)) // Standard Error: 1_000 - .saturating_add((579_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((439_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) @@ -293,56 +292,56 @@ impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) fn request_judgement(r: u32, x: u32, ) -> Weight { - (55_562_000 as Weight) + (46_592_000 as Weight) // Standard Error: 5_000 - .saturating_add((317_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((321_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((1_137_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((858_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity IdentityOf (r:1 w:1) fn cancel_request(r: u32, x: u32, ) -> Weight { - (51_744_000 as Weight) + (43_556_000 as Weight) // Standard Error: 6_000 - .saturating_add((192_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((174_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((1_131_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((850_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) fn set_fee(r: u32, ) -> Weight { - (9_472_000 as Weight) - // Standard Error: 3_000 - .saturating_add((321_000 as Weight).saturating_mul(r as Weight)) + (7_971_000 as Weight) + // Standard Error: 4_000 + .saturating_add((283_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) fn set_account_id(r: u32, ) -> Weight { - (9_705_000 as Weight) - // Standard Error: 3_000 - .saturating_add((312_000 as Weight).saturating_mul(r as Weight)) + (8_234_000 as Weight) + // Standard Error: 4_000 + .saturating_add((280_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:1) fn set_fields(r: u32, ) -> Weight { - (9_537_000 as Weight) - // Standard Error: 3_000 - .saturating_add((318_000 as Weight).saturating_mul(r as Weight)) + (8_126_000 as Weight) + // Standard Error: 4_000 + .saturating_add((275_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) fn provide_judgement(r: u32, x: u32, ) -> Weight { - (36_298_000 as Weight) + (30_949_000 as Weight) // Standard Error: 5_000 - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((286_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 0 - .saturating_add((1_141_000 as Weight).saturating_mul(x as Weight)) + .saturating_add((856_000 as Weight).saturating_mul(x as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -351,11 +350,11 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) fn kill_identity(r: u32, s: u32, _x: u32, ) -> Weight { - (63_238_000 as Weight) - // Standard Error: 10_000 - .saturating_add((246_000 as Weight).saturating_mul(r as Weight)) + (63_792_000 as Weight) + // Standard Error: 11_000 + .saturating_add((242_000 as Weight).saturating_mul(r as Weight)) // Standard Error: 1_000 - .saturating_add((2_184_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((1_738_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) @@ -364,18 +363,18 @@ impl WeightInfo for () { // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) fn add_sub(s: u32, ) -> Weight { - (57_394_000 as Weight) + (48_751_000 as Weight) // Standard Error: 1_000 - .saturating_add((208_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((193_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) fn rename_sub(s: u32, ) -> Weight { - (18_274_000 as Weight) + (15_892_000 as Weight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((49_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -383,18 +382,18 @@ impl WeightInfo for () { // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) fn remove_sub(s: u32, ) -> Weight { - (58_184_000 as Weight) + (49_746_000 as Weight) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((181_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) fn quit_sub(s: u32, ) -> Weight { - (36_304_000 as Weight) + (32_286_000 as Weight) // Standard Error: 1_000 - .saturating_add((191_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((166_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/utils/frame/benchmarking-cli/src/template.hbs b/utils/frame/benchmarking-cli/src/template.hbs index 36abf27f59a6e..ea734e165919a 100644 --- a/utils/frame/benchmarking-cli/src/template.hbs +++ b/utils/frame/benchmarking-cli/src/template.hbs @@ -6,7 +6,7 @@ //! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}, CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} // Executed Command: -{{#each args as |arg|~}} +{{#each args as |arg|}} // {{arg}} {{/each}} @@ -20,32 +20,32 @@ use sp_std::marker::PhantomData; /// Weight functions for `{{pallet}}`. pub struct WeightInfo(PhantomData); impl {{pallet}}::WeightInfo for WeightInfo { - {{~#each benchmarks as |benchmark|}} - {{~#each benchmark.comments as |comment|}} + {{#each benchmarks as |benchmark|}} + {{#each benchmark.comments as |comment|}} // {{comment}} - {{~/each}} + {{/each}} fn {{benchmark.name~}} ( {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} + {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} + {{/each}} + {{#if (ne benchmark.base_reads "0")}} .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} + {{/if}} + {{#each benchmark.component_reads as |cr|}} .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} + {{/each}} + {{#if (ne benchmark.base_writes "0")}} .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} + {{/if}} + {{#each benchmark.component_writes as |cw|}} .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} + {{/each}} } - {{~/each}} + {{/each}} } From 0214f26b5fb6afb89b5322aa59e18b1d19e88a54 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 18 Nov 2021 12:05:48 +0300 Subject: [PATCH 113/162] Set current dir at check_wasm_toolchain_installed at wasm-builder (#10284) * Set current dir at check_wasm_toolchain_installed * Add comments --- utils/wasm-builder/src/prerequisites.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index 7236b8169bcb5..88b1073a2951f 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -120,6 +120,9 @@ fn check_wasm_toolchain_installed( let manifest_path = temp.path().join("Cargo.toml").display().to_string(); let mut build_cmd = cargo_command.command(); + // Chdir to temp to avoid including project's .cargo/config.toml + // by accident - it can happen in some CI environments. + build_cmd.current_dir(&temp); build_cmd.args(&[ "build", "--target=wasm32-unknown-unknown", @@ -132,6 +135,9 @@ fn check_wasm_toolchain_installed( } let mut run_cmd = cargo_command.command(); + // Chdir to temp to avoid including project's .cargo/config.toml + // by accident - it can happen in some CI environments. + run_cmd.current_dir(&temp); run_cmd.args(&["run", "--manifest-path", &manifest_path]); // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock From e670c040d42132da982e156e25a385df213354ed Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 18 Nov 2021 20:16:38 +0900 Subject: [PATCH 114/162] Clear WASM linear memory on other OSes besides Linux too (#10291) --- client/executor/src/integration_tests/mod.rs | 78 +++++++++++++++++++ .../executor/wasmtime/src/instance_wrapper.rs | 16 ++-- client/executor/wasmtime/src/runtime.rs | 8 +- 3 files changed, 91 insertions(+), 11 deletions(-) diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 7aa02a61dba11..01c040687ddd9 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -699,3 +699,81 @@ fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecu assert!(format!("{}", error_result).contains("Spawned task")); } + +test_wasm_execution!(memory_is_cleared_between_invocations); +fn memory_is_cleared_between_invocations(wasm_method: WasmExecutionMethod) { + // This is based on the code generated by compiling a runtime *without* + // the `-C link-arg=--import-memory` using the following code and then + // disassembling the resulting blob with `wasm-dis`: + // + // ``` + // #[no_mangle] + // #[cfg(not(feature = "std"))] + // pub fn returns_no_bss_mutable_static(_: *mut u8, _: usize) -> u64 { + // static mut COUNTER: usize = 0; + // let output = unsafe { + // COUNTER += 1; + // COUNTER as u64 + // }; + // sp_core::to_substrate_wasm_fn_return_value(&output) + // } + // ``` + // + // This results in the BSS section to *not* be emitted, hence the executor has no way + // of knowing about the `static` variable's existence, so this test will fail if the linear + // memory is not properly cleared between invocations. + let binary = wat::parse_str(r#" + (module + (type $i32_=>_i32 (func (param i32) (result i32))) + (type $i32_i32_=>_i64 (func (param i32 i32) (result i64))) + (import "env" "ext_allocator_malloc_version_1" (func $ext_allocator_malloc_version_1 (param i32) (result i32))) + (global $__stack_pointer (mut i32) (i32.const 1048576)) + (global $global$1 i32 (i32.const 1048580)) + (global $global$2 i32 (i32.const 1048592)) + (memory $0 17) + (export "memory" (memory $0)) + (export "returns_no_bss_mutable_static" (func $returns_no_bss_mutable_static)) + (export "__data_end" (global $global$1)) + (export "__heap_base" (global $global$2)) + (func $returns_no_bss_mutable_static (param $0 i32) (param $1 i32) (result i64) + (local $2 i32) + (local $3 i32) + (i32.store offset=1048576 + (i32.const 0) + (local.tee $2 + (i32.add + (i32.load offset=1048576 (i32.const 0)) + (i32.const 1) + ) + ) + ) + (i64.store + (local.tee $3 + (call $ext_allocator_malloc_version_1 (i32.const 8)) + ) + (i64.extend_i32_u (local.get $2)) + ) + (i64.or + (i64.extend_i32_u (local.get $3)) + (i64.const 34359738368) + ) + ) + )"#).unwrap(); + + let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( + wasm_method, + 1024, + RuntimeBlob::uncompress_if_needed(&binary[..]).unwrap(), + HostFunctions::host_functions(), + true, + None, + ) + .unwrap(); + + let mut instance = runtime.new_instance().unwrap(); + let res = instance.call_export("returns_no_bss_mutable_static", &[0]).unwrap(); + assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); + + let res = instance.call_export("returns_no_bss_mutable_static", &[0]).unwrap(); + assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); +} diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 2b8508ee2b07f..1d40563d0a9ff 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -403,10 +403,10 @@ impl InstanceWrapper { self.memory.data_ptr(ctx) } - /// Removes physical backing from the allocated linear memory. This leads to returning the - /// memory back to the system. While the memory is zeroed this is considered as a side-effect - /// and is not relied upon. Thus this function acts as a hint. - pub fn decommit(&self, ctx: impl AsContext) { + /// If possible removes physical backing from the allocated linear memory which + /// leads to returning the memory back to the system; this also zeroes the memory + /// as a side-effect. + pub fn decommit(&self, mut ctx: impl AsContextMut) { if self.memory.data_size(&ctx) == 0 { return } @@ -417,7 +417,7 @@ impl InstanceWrapper { unsafe { let ptr = self.memory.data_ptr(&ctx); - let len = self.memory.data_size(ctx); + let len = self.memory.data_size(&ctx); // Linux handles MADV_DONTNEED reliably. The result is that the given area // is unmapped and will be zeroed on the next pagefault. @@ -429,9 +429,15 @@ impl InstanceWrapper { std::io::Error::last_os_error(), ); }); + } else { + return; } } } } + + // If we're on an unsupported OS or the memory couldn't have been + // decommited for some reason then just manually zero it out. + self.memory.data_mut(ctx.as_context_mut()).fill(0); } } diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index bd113c3383838..4d107862173b0 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -237,7 +237,7 @@ impl WasmInstance for WasmtimeInstance { // Signal to the OS that we are done with the linear memory and that it can be // reclaimed. - instance_wrapper.decommit(&store); + instance_wrapper.decommit(store); result }, @@ -415,11 +415,7 @@ pub struct Semantics { /// /// Primarily this is achieved by not recreating the instance for each call and performing a /// bare minimum clean up: reapplying the data segments and restoring the values for global - /// variables. The vast majority of the linear memory is not restored, meaning that effects - /// of previous executions on the same [`WasmInstance`] can be observed there. - /// - /// This is not a problem for a standard substrate runtime execution because it's up to the - /// runtime itself to make sure that it doesn't involve any non-determinism. + /// variables. /// /// Since this feature depends on instrumentation, it can be set only if runtime is /// instantiated using the runtime blob, e.g. using [`create_runtime`]. From 80bc4a0c79f840e36059eae5427045086b7b8d9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 18 Nov 2021 21:12:08 +0100 Subject: [PATCH 115/162] Make authorship soft deadline configurable. (#10125) * Make soft deadline configurable. * cargo +nightly fmt --all * Move setter where it belongs. --- .../basic-authorship/src/basic_authorship.rs | 37 ++++++++++++++++++- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 4c7f6c856ec86..0fd3932807a17 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -42,7 +42,7 @@ use sp_inherents::InherentData; use sp_runtime::{ generic::BlockId, traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, - Digest, + Digest, Percent, SaturatedConversion, }; use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; @@ -58,6 +58,8 @@ use sc_proposer_metrics::MetricsLink as PrometheusMetrics; /// transferred to other nodes. pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512; +const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50); + /// [`Proposer`] factory. pub struct ProposerFactory { spawn_handle: Box, @@ -72,6 +74,14 @@ pub struct ProposerFactory { /// If no `block_size_limit` is passed to [`sp_consensus::Proposer::propose`], this block size /// limit will be used. default_block_size_limit: usize, + /// Soft deadline percentage of hard deadline. + /// + /// The value is used to compute soft deadline during block production. + /// The soft deadline indicates where we should stop attempting to add transactions + /// to the block, which exhaust resources. After soft deadline is reached, + /// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS` + /// transactions which exhaust resrouces, we will conclude that the block is full. + soft_deadline_percent: Percent, telemetry: Option, /// When estimating the block size, should the proof be included? include_proof_in_block_size_estimation: bool, @@ -96,6 +106,7 @@ impl ProposerFactory { transaction_pool, metrics: PrometheusMetrics::new(prometheus), default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, + soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT, telemetry, client, include_proof_in_block_size_estimation: false, @@ -124,6 +135,7 @@ impl ProposerFactory { transaction_pool, metrics: PrometheusMetrics::new(prometheus), default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, + soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT, telemetry, include_proof_in_block_size_estimation: true, _phantom: PhantomData, @@ -147,6 +159,22 @@ impl ProposerFactory { pub fn set_default_block_size_limit(&mut self, limit: usize) { self.default_block_size_limit = limit; } + + /// Set soft deadline percentage. + /// + /// The value is used to compute soft deadline during block production. + /// The soft deadline indicates where we should stop attempting to add transactions + /// to the block, which exhaust resources. After soft deadline is reached, + /// we switch to a fixed-amount mode, in which after we see `MAX_SKIPPED_TRANSACTIONS` + /// transactions which exhaust resrouces, we will conclude that the block is full. + /// + /// Setting the value too low will significantly limit the amount of transactions + /// we try in case they exhaust resources. Setting the value too high can + /// potentially open a DoS vector, where many "exhaust resources" transactions + /// are being tried with no success, hence block producer ends up creating an empty block. + pub fn set_soft_deadline(&mut self, percent: Percent) { + self.soft_deadline_percent = percent; + } } impl ProposerFactory @@ -184,6 +212,7 @@ where now, metrics: self.metrics.clone(), default_block_size_limit: self.default_block_size_limit, + soft_deadline_percent: self.soft_deadline_percent, telemetry: self.telemetry.clone(), _phantom: PhantomData, include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, @@ -229,6 +258,7 @@ pub struct Proposer { metrics: PrometheusMetrics, default_block_size_limit: usize, include_proof_in_block_size_estimation: bool, + soft_deadline_percent: Percent, telemetry: Option, _phantom: PhantomData<(B, PR)>, } @@ -340,7 +370,10 @@ where // proceed with transactions // We calculate soft deadline used only in case we start skipping transactions. let now = (self.now)(); - let soft_deadline = now + deadline.saturating_duration_since(now) / 2; + let left = deadline.saturating_duration_since(now); + let left_micros: u64 = left.as_micros().saturated_into(); + let soft_deadline = + now + time::Duration::from_micros(self.soft_deadline_percent.mul_floor(left_micros)); let block_timer = time::Instant::now(); let mut skipped = 0; let mut unqueue_invalid = Vec::new(); From 3d4f1866ebb3cc73cceb12548b15775cedd3e475 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Fri, 19 Nov 2021 06:08:17 +0800 Subject: [PATCH 116/162] Migrate all doc to new pallet macro (#10187) * Migrate all doc to new pallet macro Signed-off-by: koushiro * Fix indent Signed-off-by: koushiro * Fix format Signed-off-by: koushiro --- bin/node-template/pallets/template/src/lib.rs | 2 +- frame/assets/README.md | 50 +++++++++++-------- frame/assets/src/lib.rs | 21 ++++---- frame/assets/src/mock.rs | 2 +- frame/collective/src/tests.rs | 3 +- frame/democracy/src/lib.rs | 13 ++--- frame/im-online/README.md | 34 ++++++++----- frame/im-online/src/lib.rs | 39 ++++++++------- frame/lottery/src/lib.rs | 4 +- frame/nicks/src/lib.rs | 8 +-- frame/node-authorization/src/lib.rs | 2 +- frame/randomness-collective-flip/README.md | 34 ++++++++----- frame/randomness-collective-flip/src/lib.rs | 8 +-- frame/recovery/src/lib.rs | 4 +- frame/scored-pool/README.md | 41 ++++++++------- frame/scored-pool/src/lib.rs | 27 ++++++---- frame/staking/README.md | 24 ++++++--- frame/staking/src/lib.rs | 22 +++++--- frame/sudo/README.md | 25 ++++++---- frame/sudo/src/lib.rs | 15 +++--- frame/system/README.md | 37 ++++++++------ frame/timestamp/README.md | 37 ++++++++------ frame/timestamp/src/lib.rs | 26 ++++++---- 23 files changed, 276 insertions(+), 202 deletions(-) diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 18599168f1a63..f5ce8c5a0f7fd 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -16,7 +16,7 @@ mod benchmarking; #[frame_support::pallet] pub mod pallet { - use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; + use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; /// Configure the pallet by specifying the parameters and types on which it depends. diff --git a/frame/assets/README.md b/frame/assets/README.md index a99b60fa33d56..aae5244953e50 100644 --- a/frame/assets/README.md +++ b/frame/assets/README.md @@ -69,35 +69,43 @@ Import the Assets module and types and derive your runtime's configuration trait ```rust use pallet_assets as assets; -use frame_support::{decl_module, dispatch, ensure}; -use frame_system::ensure_signed; use sp_runtime::ArithmeticError; -pub trait Config: assets::Config { } +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - pub fn issue_token_airdrop(origin) -> dispatch::DispatchResult { - let sender = ensure_signed(origin).map_err(|e| e.as_str())?; + #[pallet::pallet] + pub struct Pallet(_); - const ACCOUNT_ALICE: u64 = 1; - const ACCOUNT_BOB: u64 = 2; - const COUNT_AIRDROP_RECIPIENTS: u64 = 2; - const TOKENS_FIXED_SUPPLY: u64 = 100; + #[pallet::config] + pub trait Config: frame_system::Config + assets::Config {} - ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), ArithmeticError::DivisionByZero); + #[pallet::call] + impl Pallet { + pub fn issue_token_airdrop(origin: OriginFor) -> DispatchResult { + let sender = ensure_signed(origin)?; - let asset_id = Self::next_asset_id(); + const ACCOUNT_ALICE: u64 = 1; + const ACCOUNT_BOB: u64 = 2; + const COUNT_AIRDROP_RECIPIENTS: u64 = 2; + const TOKENS_FIXED_SUPPLY: u64 = 100; - >::mutate(|asset_id| *asset_id += 1); - >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); - >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); - >::insert(asset_id, TOKENS_FIXED_SUPPLY); + ensure!(!COUNT_AIRDROP_RECIPIENTS.is_zero(), ArithmeticError::DivisionByZero); - Self::deposit_event(RawEvent::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY)); - Ok(()) - } - } + let asset_id = Self::next_asset_id(); + + >::mutate(|asset_id| *asset_id += 1); + >::insert((asset_id, &ACCOUNT_ALICE), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); + >::insert((asset_id, &ACCOUNT_BOB), TOKENS_FIXED_SUPPLY / COUNT_AIRDROP_RECIPIENTS); + >::insert(asset_id, TOKENS_FIXED_SUPPLY); + + Self::deposit_event(Event::Issued(asset_id, sender, TOKENS_FIXED_SUPPLY)); + Ok(()) + } + } } ``` diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index f2bc86843ad9c..940120954f968 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -140,6 +140,15 @@ mod types; pub use types::*; use codec::HasCompact; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{ + AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, Saturating, StaticLookup, Zero, + }, + ArithmeticError, TokenError, +}; +use sp_std::{borrow::Borrow, convert::TryInto, prelude::*}; + use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, @@ -151,16 +160,6 @@ use frame_support::{ }, }; use frame_system::Config as SystemConfig; -use sp_runtime::{ - traits::{ - AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, Saturating, StaticLookup, Zero, - }, - ArithmeticError, TokenError, -}; -use sp_std::{borrow::Borrow, prelude::*}; - -#[cfg(feature = "std")] -use frame_support::traits::GenesisBuild; pub use pallet::*; pub use weights::WeightInfo; @@ -168,7 +167,7 @@ pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; + use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 1e1ea8ba9a961..aedf437ee8439 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -20,7 +20,7 @@ use super::*; use crate as pallet_assets; -use frame_support::{construct_runtime, parameter_types}; +use frame_support::{construct_runtime, parameter_types, traits::GenesisBuild}; use sp_core::H256; use sp_runtime::{ testing::Header, diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index 7e52b10a9b1d6..cbd2f68ac73e8 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -52,9 +52,8 @@ mod mock_democracy { pub use pallet::*; #[frame_support::pallet] pub mod pallet { - use frame_support::{pallet_prelude::*, traits::EnsureOrigin}; + use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use sp_runtime::DispatchResult; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 529bcebc8e374..ec706ca8f48aa 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -240,16 +240,9 @@ enum Releases { #[frame_support::pallet] pub mod pallet { - use super::*; - use frame_support::{ - dispatch::DispatchResultWithPostInfo, - pallet_prelude::*, - traits::EnsureOrigin, - weights::{DispatchClass, Pays}, - Parameter, - }; - use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - use sp_runtime::DispatchResult; + use super::{DispatchResult, *}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/im-online/README.md b/frame/im-online/README.md index 46b2268f18b12..be11e0c49dff3 100644 --- a/frame/im-online/README.md +++ b/frame/im-online/README.md @@ -26,21 +26,29 @@ It is submitted as an Unsigned Transaction via off-chain workers. ## Usage ```rust -use frame_support::{decl_module, dispatch}; -use frame_system::ensure_signed; use pallet_im_online::{self as im_online}; -pub trait Config: im_online::Config {} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 0] - pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { - let _sender = ensure_signed(origin)?; - let _is_online = >::is_online(authority_index); - Ok(()) - } - } +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + im_online::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn is_online(origin: OriginFor, authority_index: u32) -> DispatchResult { + let _sender = ensure_signed(origin)?; + let _is_online = >::is_online(authority_index); + Ok(()) + } + } } ``` diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 718c735fdad41..dbae5ed96d58a 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -17,12 +17,12 @@ //! # I'm online Pallet //! -//! If the local node is a validator (i.e. contains an authority key), this module +//! If the local node is a validator (i.e. contains an authority key), this pallet //! gossips a heartbeat transaction with each new session. The heartbeat functions //! as a simple mechanism to signal that the node is online in the current era. //! //! Received heartbeats are tracked for one era and reset with each new era. The -//! module exposes two public functions to query if a heartbeat has been received +//! pallet exposes two public functions to query if a heartbeat has been received //! in the current era or session. //! //! The heartbeat is a signed transaction, which was signed using the session key @@ -43,16 +43,24 @@ //! ## Usage //! //! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::ensure_signed; //! use pallet_im_online::{self as im_online}; //! -//! pub trait Config: im_online::Config {} +//! #[frame_support::pallet] +//! pub mod pallet { +//! use super::*; +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config + im_online::Config {} +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn is_online(origin: OriginFor, authority_index: u32) -> DispatchResult { //! let _sender = ensure_signed(origin)?; //! let _is_online = >::is_online(authority_index); //! Ok(()) @@ -64,7 +72,7 @@ //! //! ## Dependencies //! -//! This module depends on the [Session module](../pallet_session/index.html). +//! This pallet depends on the [Session pallet](../pallet_session/index.html). // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -302,15 +310,8 @@ type OffchainResult = Result::B #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{pallet_prelude::*, traits::Get, Parameter}; - use frame_system::{ensure_none, pallet_prelude::*}; - use sp_runtime::{ - traits::{MaybeSerializeDeserialize, Member}, - transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, - ValidTransaction, - }, - }; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index c1c536b8ba290..b30bc1dc523a6 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -115,8 +115,8 @@ impl ValidateCall for Pallet { #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{pallet_prelude::*, traits::EnsureOrigin, weights::Weight, Parameter}; - use frame_system::{ensure_signed, pallet_prelude::*}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 438929576269c..bfc23187fc5b5 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -52,12 +52,8 @@ type NegativeImbalanceOf = <::Currency as Currency< #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{ - ensure, - pallet_prelude::*, - traits::{EnsureOrigin, Get}, - }; - use frame_system::{ensure_signed, pallet_prelude::*}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config { diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 6e3ec58ba63f9..17b74053e6684 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -52,7 +52,7 @@ pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; + use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] diff --git a/frame/randomness-collective-flip/README.md b/frame/randomness-collective-flip/README.md index 9885c734d9fad..0730d4abf7cf2 100644 --- a/frame/randomness-collective-flip/README.md +++ b/frame/randomness-collective-flip/README.md @@ -20,18 +20,28 @@ the system trait. ### Example - Get random seed for the current block ```rust -use frame_support::{decl_module, dispatch, traits::Randomness}; - -pub trait Config: frame_system::Config {} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 0] - pub fn random_module_example(origin) -> dispatch::DispatchResult { - let _random_value = >::random(&b"my context"[..]); - Ok(()) - } - } +use frame_support::traits::Randomness; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_randomness_collective_flip::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn random_module_example(origin: OriginFor) -> DispatchResult { + let _random_value = >::random(&b"my context"[..]); + Ok(()) + } + } } ``` diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index a9abb2c9564df..345b8072c5e47 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Randomness Module +//! # Randomness Pallet //! -//! The Randomness Collective Flip module provides a [`random`](./struct.Module.html#method.random) +//! The Randomness Collective Flip pallet provides a [`random`](./struct.Module.html#method.random) //! function that generates low-influence random values based on the block hashes from the previous //! `81` blocks. Low-influence randomness can be useful when defending against relatively weak //! adversaries. Using this pallet as a randomness source is advisable primarily in low-security @@ -31,7 +31,7 @@ //! //! ### Prerequisites //! -//! Import the Randomness Collective Flip module and derive your module's configuration trait from +//! Import the Randomness Collective Flip pallet and derive your pallet's configuration trait from //! the system trait. //! //! ### Example - Get random seed for the current block @@ -41,9 +41,9 @@ //! //! #[frame_support::pallet] //! pub mod pallet { +//! use super::*; //! use frame_support::pallet_prelude::*; //! use frame_system::pallet_prelude::*; -//! use super::*; //! //! #[pallet::pallet] //! #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 797581788077b..522d7008017f3 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -206,8 +206,8 @@ pub struct RecoveryConfig { #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{ensure, pallet_prelude::*, traits::Get, Parameter}; - use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; use sp_runtime::ArithmeticError; #[pallet::pallet] diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md index bf20124edf52e..56c6af916ecd0 100644 --- a/frame/scored-pool/README.md +++ b/frame/scored-pool/README.md @@ -37,26 +37,33 @@ by the next highest scoring candidate in the pool, if available. ## Usage ```rust -use frame_support::{decl_module, dispatch}; -use frame_system::ensure_signed; use pallet_scored_pool::{self as scored_pool}; -pub trait Config: scored_pool::Config {} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 0] - pub fn candidate(origin) -> dispatch::DispatchResult { - let who = ensure_signed(origin)?; - - let _ = >::submit_candidacy( - T::Origin::from(Some(who.clone()).into()) - ); - Ok(()) - } - } +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + scored_pool::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn candidate(origin: OriginFor) -> DispatchResult { + let who = ensure_signed(origin)?; + + let _ = >::submit_candidacy( + T::Origin::from(Some(who.clone()).into()) + ); + Ok(()) + } + } } - ``` ## Dependencies diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index a5cdb6274f995..7c90d163dee1f 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -54,16 +54,24 @@ //! ## Usage //! //! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::ensure_signed; //! use pallet_scored_pool::{self as scored_pool}; //! -//! pub trait Config: scored_pool::Config {} +//! #[frame_support::pallet] +//! pub mod pallet { +//! use super::*; +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn candidate(origin) -> dispatch::DispatchResult { +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config + scored_pool::Config {} +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn candidate(origin: OriginFor) -> DispatchResult { //! let who = ensure_signed(origin)?; //! //! let _ = >::submit_candidacy( @@ -116,9 +124,8 @@ enum ChangeReceiver { #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{pallet_prelude::*, traits::EnsureOrigin, weights::Weight}; - use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; - use sp_runtime::traits::MaybeSerializeDeserialize; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/staking/README.md b/frame/staking/README.md index 072353b1a586c..bbd5bd18f6e81 100644 --- a/frame/staking/README.md +++ b/frame/staking/README.md @@ -133,19 +133,27 @@ The Staking module contains many public storage items and (im)mutable functions. ### Example: Rewarding a validator by id. ```rust -use frame_support::{decl_module, dispatch}; -use frame_system::ensure_signed; use pallet_staking::{self as staking}; -pub trait Config: staking::Config {} +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; -decl_module! { - pub struct Module for enum Call where origin: T::Origin { + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + staking::Config {} + + #[pallet::call] + impl Pallet { /// Reward a validator. - #[weight = 0] - pub fn reward_myself(origin) -> dispatch::DispatchResult { + #[pallet::weight(0)] + pub fn reward_myself(origin: OriginFor) -> DispatchResult { let reported = ensure_signed(origin)?; - >::reward_by_ids(vec![(reported, 10)]); + >::reward_by_ids(vec![(reported, 10)]); Ok(()) } } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index be02e8d91d326..cb91abe476cba 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -157,17 +157,25 @@ //! ### Example: Rewarding a validator by id. //! //! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::ensure_signed; //! use pallet_staking::{self as staking}; //! -//! pub trait Config: staking::Config {} +//! #[frame_support::pallet] +//! pub mod pallet { +//! use super::*; +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config + staking::Config {} +//! +//! #[pallet::call] +//! impl Pallet { //! /// Reward a validator. -//! #[weight = 0] -//! pub fn reward_myself(origin) -> dispatch::DispatchResult { +//! #[pallet::weight(0)] +//! pub fn reward_myself(origin: OriginFor) -> DispatchResult { //! let reported = ensure_signed(origin)?; //! >::reward_by_ids(vec![(reported, 10)]); //! Ok(()) diff --git a/frame/sudo/README.md b/frame/sudo/README.md index 60090db46a4fc..e8f688091e326 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -35,15 +35,22 @@ Learn more about privileged functions and `Root` origin in the [`Origin`] type d This is an example of a module that exposes a privileged function: ```rust -use frame_support::{decl_module, dispatch}; -use frame_system::ensure_root; - -pub trait Config: frame_system::Config {} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 0] - pub fn privileged_function(origin) -> dispatch::DispatchResult { +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn privileged_function(origin: OriginFor) -> DispatchResult { ensure_root(origin)?; // do something... diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 5f8e6fc0cc13a..de3b3439bc344 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -52,28 +52,27 @@ //! This is an example of a pallet that exposes a privileged function: //! //! ``` -//! //! #[frame_support::pallet] -//! pub mod logger { +//! pub mod pallet { +//! use super::*; //! use frame_support::pallet_prelude::*; //! use frame_system::pallet_prelude::*; -//! use super::*; +//! +//! #[pallet::pallet] +//! pub struct Pallet(_); //! //! #[pallet::config] //! pub trait Config: frame_system::Config {} //! -//! #[pallet::pallet] -//! pub struct Pallet(PhantomData); -//! //! #[pallet::call] //! impl Pallet { //! #[pallet::weight(0)] -//! pub fn privileged_function(origin: OriginFor) -> DispatchResultWithPostInfo { +//! pub fn privileged_function(origin: OriginFor) -> DispatchResult { //! ensure_root(origin)?; //! //! // do something... //! -//! Ok(().into()) +//! Ok(()) //! } //! } //! } diff --git a/frame/system/README.md b/frame/system/README.md index 6766c3d73f4de..c22b41e42d798 100644 --- a/frame/system/README.md +++ b/frame/system/README.md @@ -54,21 +54,28 @@ Import the System module and derive your module's configuration trait from the s ### Example - Get extrinsic count and parent hash for the current block ```rust -use frame_support::{decl_module, dispatch}; -use frame_system::{self as system, ensure_signed}; - -pub trait Config: system::Config {} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 0] - pub fn system_module_example(origin) -> dispatch::DispatchResult { - let _sender = ensure_signed(origin)?; - let _extrinsic_count = >::extrinsic_count(); - let _parent_hash = >::parent_hash(); - Ok(()) - } - } +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn system_module_example(origin: OriginFor) -> DispatchResult { + let _sender = ensure_signed(origin)?; + let _extrinsic_count = >::extrinsic_count(); + let _parent_hash = >::parent_hash(); + Ok(()) + } + } } ``` diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md index 5f8388b04f829..1546377ee6743 100644 --- a/frame/timestamp/README.md +++ b/frame/timestamp/README.md @@ -45,20 +45,29 @@ trait from the timestamp trait. ### Get current timestamp ```rust -use frame_support::{decl_module, dispatch}; -use frame_system::ensure_signed; - -pub trait Config: timestamp::Config {} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 0] - pub fn get_time(origin) -> dispatch::DispatchResult { - let _sender = ensure_signed(origin)?; - let _now = >::get(); - Ok(()) - } - } +use pallet_timestamp::{self as timestamp}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + timestamp::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn get_time(origin: OriginFor) -> DispatchResult { + let _sender = ensure_signed(origin)?; + let _now = >::get(); + Ok(()) + } + } } ``` diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 153606bedbacf..eeb840715f817 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -64,18 +64,26 @@ //! ### Get current timestamp //! //! ``` -//! use frame_support::{decl_module, dispatch}; -//! # use pallet_timestamp as timestamp; -//! use frame_system::ensure_signed; +//! use pallet_timestamp::{self as timestamp}; //! -//! pub trait Config: timestamp::Config {} +//! #[frame_support::pallet] +//! pub mod pallet { +//! use super::*; +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn get_time(origin) -> dispatch::DispatchResult { +//! #[pallet::pallet] +//! pub struct Pallet(_); +//! +//! #[pallet::config] +//! pub trait Config: frame_system::Config + timestamp::Config {} +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn get_time(origin: OriginFor) -> DispatchResult { //! let _sender = ensure_signed(origin)?; -//! let _now = >::get(); +//! let _now = >::get(); //! Ok(()) //! } //! } From 4581dd99ed59f65ede46a34c81133177d5824a68 Mon Sep 17 00:00:00 2001 From: Georges Date: Fri, 19 Nov 2021 09:29:12 +0000 Subject: [PATCH 117/162] Moving `pallet-asset-tx-payment` from cumulus to substrate (#10127) * Moving `pallet-asset-tx-payment` from cumulus * move pallet-asset-tx-payment into transaction payment directory * cargo +nightly fmt * Adding `pallet-asset-tx-payment` to node runtime I had to change the Balance type to u128. Also harmonised that pallet's version * Updating cargo.lock after merge * forgot this * Adding tx-payment signature * Missed one more * `transaction-payment` replaced in`SignedExtension` by `asset-tx-payment` and not added * Fixing benches * add test to verify that we don't charge on post-dispatch if we didn't on pre-dispatch * add (failing) test for asset tx payment of unsigned extrinsics * fix test by removing debug_assert * cargo +nightly fmt * typo in `Cargo.lock` * Object defined twice in lock file * cargo update * remove todo * Apply formatting suggestions from code review Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Refactoring `post_dispatch` of `asset-tx-payment` to reuse `post_dispatch` of `transaction-payment` if the fee asset is native Removing unneeded imports. * Removing redundant `TODO` * Reverting an accidental bump of `impl-serde` from `0.3.1` to `0.3.2` * Revert unneeded changes to `cargo.lock` * Update frame/transaction-payment/asset-tx-payment/src/payment.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Fixing cargo fmt Reverting changes which broke cargo fmt Co-authored-by: Alexander Popiak Co-authored-by: Alexander Popiak Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 26 + Cargo.toml | 1 + bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 6 +- bin/node/runtime/Cargo.toml | 1 + bin/node/runtime/src/impls.rs | 19 +- bin/node/runtime/src/lib.rs | 17 +- bin/node/test-runner-example/Cargo.toml | 1 + bin/node/test-runner-example/src/lib.rs | 2 +- bin/node/testing/Cargo.toml | 1 + bin/node/testing/src/keyring.rs | 2 +- .../asset-tx-payment/Cargo.toml | 55 ++ .../asset-tx-payment/README.md | 21 + .../asset-tx-payment/src/lib.rs | 288 +++++++ .../asset-tx-payment/src/payment.rs | 168 ++++ .../asset-tx-payment/src/tests.rs | 748 ++++++++++++++++++ 16 files changed, 1346 insertions(+), 11 deletions(-) create mode 100644 frame/transaction-payment/asset-tx-payment/Cargo.toml create mode 100644 frame/transaction-payment/asset-tx-payment/README.md create mode 100644 frame/transaction-payment/asset-tx-payment/src/lib.rs create mode 100644 frame/transaction-payment/asset-tx-payment/src/payment.rs create mode 100644 frame/transaction-payment/asset-tx-payment/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index f1e8b00c6eb0e..32234cc4c93a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4536,6 +4536,7 @@ dependencies = [ "node-primitives", "node-rpc", "node-runtime", + "pallet-asset-tx-payment", "pallet-balances", "pallet-im-online", "pallet-timestamp", @@ -4701,6 +4702,7 @@ dependencies = [ "hex-literal", "log 0.4.14", "node-primitives", + "pallet-asset-tx-payment", "pallet-assets", "pallet-authority-discovery", "pallet-authorship", @@ -4861,6 +4863,7 @@ dependencies = [ "node-executor", "node-primitives", "node-runtime", + "pallet-asset-tx-payment", "pallet-transaction-payment", "parity-scale-codec", "sc-block-builder", @@ -5104,6 +5107,28 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "pallet-asset-tx-payment" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "pallet-assets", + "pallet-authorship", + "pallet-balances", + "pallet-transaction-payment", + "parity-scale-codec", + "scale-info", + "serde", + "serde_json", + "smallvec 1.7.0", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-storage", +] + [[package]] name = "pallet-assets" version = "4.0.0-dev" @@ -10420,6 +10445,7 @@ dependencies = [ "node-cli", "node-primitives", "node-runtime", + "pallet-asset-tx-payment", "pallet-transaction-payment", "sc-consensus", "sc-consensus-babe", diff --git a/Cargo.toml b/Cargo.toml index e03f33a4d27d5..f30b223a9b205 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -124,6 +124,7 @@ members = [ "frame/system/rpc/runtime-api", "frame/timestamp", "frame/transaction-payment", + "frame/transaction-payment/asset-tx-payment", "frame/transaction-payment/rpc", "frame/transaction-payment/rpc/runtime-api", "frame/transaction-storage", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 7529138c7f9d7..5a9e76bccf63b 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -81,6 +81,7 @@ sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../frame/system/rpc/runtime-api" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } +pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment/" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } # node-specific dependencies diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e73b69153d1df..fec91a9b67cc4 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -92,7 +92,7 @@ pub fn create_extrinsic( )), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), ); let raw_payload = node_runtime::SignedPayload::from_raw( @@ -725,7 +725,7 @@ mod tests { let check_era = frame_system::CheckEra::from(Era::Immortal); let check_nonce = frame_system::CheckNonce::from(index); let check_weight = frame_system::CheckWeight::new(); - let payment = pallet_transaction_payment::ChargeTransactionPayment::from(0); + let tx_payment = pallet_asset_tx_payment::ChargeAssetTxPayment::from(0, None); let extra = ( check_spec_version, check_tx_version, @@ -733,7 +733,7 @@ mod tests { check_era, check_nonce, check_weight, - payment, + tx_payment, ); let raw_payload = SignedPayload::from_raw( function, diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 9a481120fd01e..f9ce4b0fca900 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -95,6 +95,7 @@ pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../ pallet-utility = { version = "4.0.0-dev", default-features = false, path = "../../../frame/utility" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-asset-tx-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/asset-tx-payment/" } pallet-transaction-storage = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-storage" } pallet-uniques = { version = "4.0.0-dev", default-features = false, path = "../../../frame/uniques" } pallet-vesting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/vesting" } diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index e315a45e698ce..cdd9f0900fd38 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -17,8 +17,12 @@ //! Some configurable implementations as associated type for the substrate runtime. -use crate::{Authorship, Balances, NegativeImbalance}; -use frame_support::traits::{Currency, OnUnbalanced}; +use crate::{AccountId, Assets, Authorship, Balances, NegativeImbalance, Runtime}; +use frame_support::traits::{ + fungibles::{Balanced, CreditOf}, + Currency, OnUnbalanced, +}; +use pallet_asset_tx_payment::HandleCredit; pub struct Author; impl OnUnbalanced for Author { @@ -27,6 +31,17 @@ impl OnUnbalanced for Author { } } +/// A `HandleCredit` implementation that naively transfers the fees to the block author. +/// Will drop and burn the assets in case the transfer fails. +pub struct CreditToBlockAuthor; +impl HandleCredit for CreditToBlockAuthor { + fn handle_credit(credit: CreditOf) { + let author = pallet_authorship::Pallet::::author(); + // Drop the result which will trigger the `OnDrop` of the imbalance in case of error. + let _ = Assets::resolve(&author, credit); + } +} + #[cfg(test)] mod multiplier_tests { use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6d04ca8fdca87..5b3c0685d1a2a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -87,7 +87,7 @@ pub use sp_runtime::BuildStorage; /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; -use impls::Author; +use impls::{Author, CreditToBlockAuthor}; /// Constant values used within the runtime. pub mod constants; @@ -432,6 +432,14 @@ impl pallet_transaction_payment::Config for Runtime { TargetedFeeAdjustment; } +impl pallet_asset_tx_payment::Config for Runtime { + type Fungibles = Assets; + type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< + pallet_assets::BalanceToAssetBalance, + CreditToBlockAuthor, + >; +} + parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } @@ -969,7 +977,7 @@ where frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1168,7 +1176,7 @@ parameter_types! { impl pallet_assets::Config for Runtime { type Event = Event; - type Balance = u64; + type Balance = u128; type AssetId = u32; type Currency = Balances; type ForceOrigin = EnsureRoot; @@ -1257,6 +1265,7 @@ construct_runtime!( Indices: pallet_indices, Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, + AssetTxPayment: pallet_asset_tx_payment, ElectionProviderMultiPhase: pallet_election_provider_multi_phase, Staking: pallet_staking, Session: pallet_session, @@ -1315,7 +1324,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, + pallet_asset_tx_payment::ChargeAssetTxPayment, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml index b664cdb8e50e2..831a687254409 100644 --- a/bin/node/test-runner-example/Cargo.toml +++ b/bin/node/test-runner-example/Cargo.toml @@ -11,6 +11,7 @@ test-runner = { path = "../../../test-utils/test-runner" } frame-system = { path = "../../../frame/system" } frame-benchmarking = { path = "../../../frame/benchmarking" } pallet-transaction-payment = { path = "../../../frame/transaction-payment" } +pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment/" } node-runtime = { path = "../runtime" } node-primitives = { path = "../primitives" } diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs index 0de7f5a4e2b70..68c14b73bf562 100644 --- a/bin/node/test-runner-example/src/lib.rs +++ b/bin/node/test-runner-example/src/lib.rs @@ -77,7 +77,7 @@ impl ChainInfo for NodeTemplateChainInfo { frame_system::Pallet::::account_nonce(from), ), frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), + pallet_asset_tx_payment::ChargeAssetTxPayment::::from(0, None), ) } } diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 1854029b0709e..0e5ed07ac2952 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -38,6 +38,7 @@ sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/c frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } +pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment/" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/timestamp" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 4e2d88b4bba33..1040e90c4d5d4 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -76,7 +76,7 @@ pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { frame_system::CheckEra::from(Era::mortal(256, 0)), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(extra_fee), + pallet_asset_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), ) } diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml new file mode 100644 index 0000000000000..a381145d667a1 --- /dev/null +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "pallet-asset-tx-payment" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "pallet to manage transaction payments in assets" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# Substrate dependencies +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = ".." } + +# Other dependencies +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.126", optional = true } + +[dev-dependencies] +smallvec = "1.7.0" +serde_json = "1.0.68" + +sp-storage = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/storage" } + +pallet-assets = { version = "4.0.0-dev", path = "../../assets" } +pallet-authorship = { version = "4.0.0-dev", path = "../../authorship" } +pallet-balances = { version = "4.0.0-dev", path = "../../balances" } + + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "sp-io/std", + "sp-core/std", + "pallet-transaction-payment/std", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/asset-tx-payment/README.md b/frame/transaction-payment/asset-tx-payment/README.md new file mode 100644 index 0000000000000..fc860347d85fa --- /dev/null +++ b/frame/transaction-payment/asset-tx-payment/README.md @@ -0,0 +1,21 @@ +# pallet-asset-tx-payment + +## Asset Transaction Payment Pallet + +This pallet allows runtimes that include it to pay for transactions in assets other than the +native token of the chain. + +### Overview +It does this by extending transactions to include an optional `AssetId` that specifies the asset +to be used for payment (defaulting to the native token on `None`). It expects an +[`OnChargeAssetTransaction`] implementation analogously to [`pallet-transaction-payment`]. The +included [`FungiblesAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the fee +amount by converting the fee calculated by [`pallet-transaction-payment`] into the desired +asset. + +### Integration +This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means +you should include both pallets in your `construct_runtime` macro, but only include this +pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). + +License: Apache-2.0 diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs new file mode 100644 index 0000000000000..1f22669857d76 --- /dev/null +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -0,0 +1,288 @@ +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Asset Transaction Payment Pallet +//! +//! This pallet allows runtimes that include it to pay for transactions in assets other than the +//! main token of the chain. +//! +//! ## Overview + +//! It does this by extending transactions to include an optional `AssetId` that specifies the asset +//! to be used for payment (defaulting to the native token on `None`). It expects an +//! [`OnChargeAssetTransaction`] implementation analogously to [`pallet-transaction-payment`]. The +//! included [`FungiblesAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the fee +//! amount by converting the fee calculated by [`pallet-transaction-payment`] into the desired +//! asset. +//! +//! ## Integration + +//! This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means +//! you should include both pallets in your `construct_runtime` macro, but only include this +//! pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::prelude::*; + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::DispatchResult, + traits::{ + tokens::{ + fungibles::{Balanced, CreditOf, Inspect}, + WithdrawConsequence, + }, + IsType, + }, + weights::{DispatchInfo, PostDispatchInfo}, + DefaultNoBound, +}; +use pallet_transaction_payment::OnChargeTransaction; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, + transaction_validity::{ + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + }, + FixedPointOperand, +}; + +#[cfg(test)] +mod tests; + +mod payment; +pub use payment::*; + +// Type aliases used for interaction with `OnChargeTransaction`. +pub(crate) type OnChargeTransactionOf = + ::OnChargeTransaction; +// Balance type alias. +pub(crate) type BalanceOf = as OnChargeTransaction>::Balance; +// Liquity info type alias. +pub(crate) type LiquidityInfoOf = + as OnChargeTransaction>::LiquidityInfo; + +// Type alias used for interaction with fungibles (assets). +// Balance type alias. +pub(crate) type AssetBalanceOf = + <::Fungibles as Inspect<::AccountId>>::Balance; +/// Asset id type alias. +pub(crate) type AssetIdOf = + <::Fungibles as Inspect<::AccountId>>::AssetId; + +// Type aliases used for interaction with `OnChargeAssetTransaction`. +// Balance type alias. +pub(crate) type ChargeAssetBalanceOf = + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::Balance; +// Asset id type alias. +pub(crate) type ChargeAssetIdOf = + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId; +// Liquity info type alias. +pub(crate) type ChargeAssetLiquidityOf = + <::OnChargeAssetTransaction as OnChargeAssetTransaction>::LiquidityInfo; + +/// Used to pass the initial payment info from pre- to post-dispatch. +#[derive(Encode, Decode, DefaultNoBound, TypeInfo)] +pub enum InitialPayment { + /// No initial fee was payed. + Nothing, + /// The initial fee was payed in the native currency. + Native(LiquidityInfoOf), + /// The initial fee was payed in an asset. + Asset(CreditOf), +} + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_transaction_payment::Config { + /// The fungibles instance used to pay for transactions in assets. + type Fungibles: Balanced; + /// The actual transaction charging logic that charges the fees. + type OnChargeAssetTransaction: OnChargeAssetTransaction; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); +} + +/// Require the transactor pay for themselves and maybe include a tip to gain additional priority +/// in the queue. Allows paying via both `Currency` as well as `fungibles::Balanced`. +/// +/// Wraps the transaction logic in [`pallet_transaction_payment`] and extends it with assets. +/// An asset id of `None` falls back to the underlying transaction payment via the native currency. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct ChargeAssetTxPayment { + #[codec(compact)] + tip: BalanceOf, + asset_id: Option>, +} + +impl ChargeAssetTxPayment +where + T::Call: Dispatchable, + AssetBalanceOf: Send + Sync + FixedPointOperand, + BalanceOf: Send + Sync + FixedPointOperand + IsType>, + ChargeAssetIdOf: Send + Sync, + CreditOf: IsType>, +{ + /// Utility constructor. Used only in client/factory code. + pub fn from(tip: BalanceOf, asset_id: Option>) -> Self { + Self { tip, asset_id } + } + + /// Fee withdrawal logic that dispatches to either `OnChargeAssetTransaction` or + /// `OnChargeTransaction`. + fn withdraw_fee( + &self, + who: &T::AccountId, + call: &T::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { + let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); + debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); + if fee.is_zero() { + Ok((fee, InitialPayment::Nothing)) + } else if let Some(asset_id) = self.asset_id { + T::OnChargeAssetTransaction::withdraw_fee( + who, + call, + info, + asset_id, + fee.into(), + self.tip.into(), + ) + .map(|i| (fee, InitialPayment::Asset(i.into()))) + } else { + as OnChargeTransaction>::withdraw_fee( + who, call, info, fee, self.tip, + ) + .map(|i| (fee, InitialPayment::Native(i))) + .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) + } + } +} + +impl sp_std::fmt::Debug for ChargeAssetTxPayment { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "ChargeAssetTxPayment<{:?}, {:?}>", self.tip, self.asset_id.encode()) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl SignedExtension for ChargeAssetTxPayment +where + T::Call: Dispatchable, + AssetBalanceOf: Send + Sync + FixedPointOperand, + BalanceOf: Send + Sync + From + FixedPointOperand + IsType>, + ChargeAssetIdOf: Send + Sync, + CreditOf: IsType>, +{ + const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = (); + type Pre = ( + // tip + BalanceOf, + // who paid the fee + Self::AccountId, + // imbalance resulting from withdrawing the fee + InitialPayment, + ); + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + + fn validate( + &self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + use pallet_transaction_payment::ChargeTransactionPayment; + let (fee, _) = self.withdraw_fee(who, call, info, len)?; + let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); + Ok(ValidTransaction { priority, ..Default::default() }) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; + Ok((self.tip, who.clone(), initial_payment)) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + let (tip, who, initial_payment) = pre; + match initial_payment { + InitialPayment::Native(already_withdrawn) => { + pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( + (tip, who, already_withdrawn), + info, + post_info, + len, + result, + )?; + }, + InitialPayment::Asset(already_withdrawn) => { + let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( + len as u32, info, post_info, tip, + ); + T::OnChargeAssetTransaction::correct_and_deposit_fee( + &who, + info, + post_info, + actual_fee.into(), + tip.into(), + already_withdrawn.into(), + )?; + }, + InitialPayment::Nothing => { + // `actual_fee` should be zero here for any signed extrinsic. It would be non-zero + // here in case of unsigned extrinsics as they don't pay fees but + // `compute_actual_fee` is not aware of them. In both cases it's fine to just move + // ahead without adjusting the fee, though, so we do nothing. + debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); + }, + } + + Ok(()) + } +} diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs new file mode 100644 index 0000000000000..09482f96490c7 --- /dev/null +++ b/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -0,0 +1,168 @@ +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///! Traits and default implementation for paying transaction fees in assets. +use super::*; +use crate::Config; + +use codec::FullCodec; +use frame_support::{ + traits::{ + fungibles::{Balanced, CreditOf, Inspect}, + tokens::BalanceConversion, + }, + unsigned::TransactionValidityError, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{ + AtLeast32BitUnsigned, DispatchInfoOf, MaybeSerializeDeserialize, One, PostDispatchInfoOf, + }, + transaction_validity::InvalidTransaction, +}; +use sp_std::{fmt::Debug, marker::PhantomData}; + +/// Handle withdrawing, refunding and depositing of transaction fees. +pub trait OnChargeAssetTransaction { + /// The underlying integer type in which fees are calculated. + type Balance: AtLeast32BitUnsigned + + FullCodec + + Copy + + MaybeSerializeDeserialize + + Debug + + Default + + TypeInfo; + /// The type used to identify the assets used for transaction payment. + type AssetId: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo; + /// The type used to store the intermediate values between pre- and post-dispatch. + type LiquidityInfo; + + /// Before the transaction is executed the payment of the transaction fees needs to be secured. + /// + /// Note: The `fee` already includes the `tip`. + fn withdraw_fee( + who: &T::AccountId, + call: &T::Call, + dispatch_info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: Self::Balance, + tip: Self::Balance, + ) -> Result; + + /// After the transaction was executed the actual fee can be calculated. + /// This function should refund any overpaid fees and optionally deposit + /// the corrected amount. + /// + /// Note: The `fee` already includes the `tip`. + fn correct_and_deposit_fee( + who: &T::AccountId, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + corrected_fee: Self::Balance, + tip: Self::Balance, + already_withdrawn: Self::LiquidityInfo, + ) -> Result<(), TransactionValidityError>; +} + +/// Allows specifying what to do with the withdrawn asset fees. +pub trait HandleCredit> { + /// Implement to determine what to do with the withdrawn asset fees. + /// Default for `CreditOf` from the assets pallet is to burn and + /// decrease total issuance. + fn handle_credit(credit: CreditOf); +} + +/// Default implementation that just drops the credit according to the `OnDrop` in the underlying +/// imbalance type. +impl> HandleCredit for () { + fn handle_credit(_credit: CreditOf) {} +} + +/// Implements the asset transaction for a balance to asset converter (implementing +/// [`BalanceConversion`]) and a credit handler (implementing [`HandleCredit`]). +/// +/// The credit handler is given the complete fee in terms of the asset used for the transaction. +pub struct FungiblesAdapter(PhantomData<(CON, HC)>); + +/// Default implementation for a runtime instantiating this pallet, a balance to asset converter and +/// a credit handler. +impl OnChargeAssetTransaction for FungiblesAdapter +where + T: Config, + CON: BalanceConversion, AssetIdOf, AssetBalanceOf>, + HC: HandleCredit, + AssetIdOf: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo, +{ + type Balance = BalanceOf; + type AssetId = AssetIdOf; + type LiquidityInfo = CreditOf; + + /// Withdraw the predicted fee from the transaction origin. + /// + /// Note: The `fee` already includes the `tip`. + fn withdraw_fee( + who: &T::AccountId, + _call: &T::Call, + _info: &DispatchInfoOf, + asset_id: Self::AssetId, + fee: Self::Balance, + _tip: Self::Balance, + ) -> Result { + // We don't know the precision of the underlying asset. Because the converted fee could be + // less than one (e.g. 0.5) but gets rounded down by integer division we introduce a minimum + // fee. + let min_converted_fee = if fee.is_zero() { Zero::zero() } else { One::one() }; + let converted_fee = CON::to_asset_balance(fee, asset_id) + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))? + .max(min_converted_fee); + let can_withdraw = >::can_withdraw( + asset_id.into(), + who, + converted_fee, + ); + if !matches!(can_withdraw, WithdrawConsequence::Success) { + return Err(InvalidTransaction::Payment.into()) + } + >::withdraw(asset_id.into(), who, converted_fee) + .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) + } + + /// Hand the fee and the tip over to the `[HandleCredit]` implementation. + /// Since the predicted fee might have been too high, parts of the fee may be refunded. + /// + /// Note: The `corrected_fee` already includes the `tip`. + fn correct_and_deposit_fee( + who: &T::AccountId, + _dispatch_info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + corrected_fee: Self::Balance, + _tip: Self::Balance, + paid: Self::LiquidityInfo, + ) -> Result<(), TransactionValidityError> { + let min_converted_fee = if corrected_fee.is_zero() { Zero::zero() } else { One::one() }; + // Convert the corrected fee into the asset used for payment. + let converted_fee = CON::to_asset_balance(corrected_fee, paid.asset().into()) + .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() })? + .max(min_converted_fee); + // Calculate how much refund we should return. + let (final_fee, refund) = paid.split(converted_fee); + // Refund to the account that paid the fees. If this fails, the account might have dropped + // below the existential balance. In that case we don't refund anything. + let _ = >::resolve(who, refund); + // Handle the final fee, e.g. by transferring to the block author or burning. + HC::handle_credit(final_fee); + Ok(()) + } +} diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs new file mode 100644 index 0000000000000..bd5dc57239a28 --- /dev/null +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -0,0 +1,748 @@ +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_asset_tx_payment; + +use frame_support::{ + assert_ok, + pallet_prelude::*, + parameter_types, + traits::{fungibles::Mutate, FindAuthor}, + weights::{ + DispatchClass, DispatchInfo, PostDispatchInfo, Weight, WeightToFeeCoefficient, + WeightToFeeCoefficients, WeightToFeePolynomial, + }, + ConsensusEngineId, +}; +use frame_system as system; +use frame_system::EnsureRoot; +use pallet_balances::Call as BalancesCall; +use pallet_transaction_payment::CurrencyAdapter; +use smallvec::smallvec; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, ConvertInto, IdentityLookup, StaticLookup}, + Perbill, +}; +use std::cell::RefCell; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; +type Balance = u64; +type AccountId = u64; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + Assets: pallet_assets::{Pallet, Call, Storage, Event}, + Authorship: pallet_authorship::{Pallet, Call, Storage}, + AssetTxPayment: pallet_asset_tx_payment::{Pallet}, + } +); + +const CALL: &::Call = + &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + +thread_local! { + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); +} + +pub struct BlockWeights; +impl Get for BlockWeights { + fn get() -> frame_system::limits::BlockWeights { + frame_system::limits::BlockWeights::builder() + .base_block(0) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); + }) + .for_class(DispatchClass::non_mandatory(), |weights| { + weights.max_total = 1024.into(); + }) + .build_or_panic() + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub static TransactionByteFee: u64 = 1; + pub static WeightToFee: u64 = 1; +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 10; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +impl WeightToFeePolynomial for WeightToFee { + type Balance = u64; + + fn polynomial() -> WeightToFeeCoefficients { + smallvec![WeightToFeeCoefficient { + degree: 1, + coeff_frac: Perbill::zero(), + coeff_integer: WEIGHT_TO_FEE.with(|v| *v.borrow()), + negative: false, + }] + } +} + +parameter_types! { + pub const OperationalFeeMultiplier: u8 = 5; +} + +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = WeightToFee; + type FeeMultiplierUpdate = (); + type OperationalFeeMultiplier = OperationalFeeMultiplier; +} + +parameter_types! { + pub const AssetDeposit: u64 = 2; + pub const MetadataDeposit: u64 = 0; + pub const StringLimit: u32 = 20; +} + +impl pallet_assets::Config for Runtime { + type Event = Event; + type Balance = Balance; + type AssetId = u32; + type Currency = Balances; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDeposit; + type MetadataDepositPerByte = MetadataDeposit; + type ApprovalDeposit = MetadataDeposit; + type StringLimit = StringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = (); +} + +pub struct HardcodedAuthor; +const BLOCK_AUTHOR: AccountId = 1234; +impl FindAuthor for HardcodedAuthor { + fn find_author<'a, I>(_: I) -> Option + where + I: 'a + IntoIterator, + { + Some(BLOCK_AUTHOR) + } +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = HardcodedAuthor; + type UncleGenerations = (); + type FilterUncle = (); + type EventHandler = (); +} + +pub struct CreditToBlockAuthor; +impl HandleCredit for CreditToBlockAuthor { + fn handle_credit(credit: CreditOf) { + let author = pallet_authorship::Pallet::::author(); + // What to do in case paying the author fails (e.g. because `fee < min_balance`) + // default: drop the result which will trigger the `OnDrop` of the imbalance. + let _ = >::resolve(&author, credit); + } +} + +impl Config for Runtime { + type Fungibles = Assets; + type OnChargeAssetTransaction = FungiblesAdapter< + pallet_assets::BalanceToAssetBalance, + CreditToBlockAuthor, + >; +} + +pub struct ExtBuilder { + balance_factor: u64, + base_weight: u64, + byte_fee: u64, + weight_to_fee: u64, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } + } +} + +impl ExtBuilder { + pub fn base_weight(mut self, base_weight: u64) -> Self { + self.base_weight = base_weight; + self + } + pub fn balance_factor(mut self, factor: u64) -> Self { + self.balance_factor = factor; + self + } + fn set_constants(&self) { + EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow_mut() = self.base_weight); + TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); + WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); + } + pub fn build(self) -> sp_io::TestExternalities { + self.set_constants(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: if self.balance_factor > 0 { + vec![ + (1, 10 * self.balance_factor), + (2, 20 * self.balance_factor), + (3, 30 * self.balance_factor), + (4, 40 * self.balance_factor), + (5, 50 * self.balance_factor), + (6, 60 * self.balance_factor), + ] + } else { + vec![] + }, + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } +} + +/// create a transaction info struct from weight. Handy to avoid building the whole struct. +pub fn info_from_weight(w: Weight) -> DispatchInfo { + // pays_fee: Pays::Yes -- class: DispatchClass::Normal + DispatchInfo { weight: w, ..Default::default() } +} + +fn post_info_from_weight(w: Weight) -> PostDispatchInfo { + PostDispatchInfo { actual_weight: Some(w), pays_fee: Default::default() } +} + +fn info_from_pays(p: Pays) -> DispatchInfo { + DispatchInfo { pays_fee: p, ..Default::default() } +} + +fn post_info_from_pays(p: Pays) -> PostDispatchInfo { + PostDispatchInfo { actual_weight: None, pays_fee: p } +} + +fn default_post_info() -> PostDispatchInfo { + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } +} + +#[test] +fn transaction_payment_in_native_possible() { + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(5) + .build() + .execute_with(|| { + let len = 10; + let pre = ChargeAssetTxPayment::::from(0, None) + .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .unwrap(); + let initial_balance = 10 * balance_factor; + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(5), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); + + let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + let initial_balance_for_2 = 20 * balance_factor; + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); + }); +} + +#[test] +fn transaction_payment_in_asset_possible() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(base_weight) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 1; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 100; + assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + let weight = 5; + let len = 10; + // we convert the from weight to fee based on the ratio between asset min balance and + // existential deposit + let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .unwrap(); + // assert that native balance is not used + assert_eq!(Balances::free_balance(caller), 10 * balance_factor); + // check that fee was charged in the given asset + assert_eq!(Assets::balance(asset_id, caller), balance - fee); + assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(weight), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance - fee); + // check that the block author gets rewarded + assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), fee); + }); +} + +#[test] +fn transaction_payment_without_fee() { + let base_weight = 5; + let balance_factor = 100; + ExtBuilder::default() + .balance_factor(balance_factor) + .base_weight(base_weight) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 1; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 100; + assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + let weight = 5; + let len = 10; + // we convert the from weight to fee based on the ratio between asset min balance and + // existential deposit + let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .unwrap(); + // assert that native balance is not used + assert_eq!(Balances::free_balance(caller), 10 * balance_factor); + // check that fee was charged in the given asset + assert_eq!(Assets::balance(asset_id, caller), balance - fee); + assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(weight), + &post_info_from_pays(Pays::No), + len, + &Ok(()) + )); + // caller should be refunded + assert_eq!(Assets::balance(asset_id, caller), balance); + // check that the block author did not get rewarded + assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); + }); +} + +#[test] +fn asset_transaction_payment_with_tip_and_refund() { + let base_weight = 5; + ExtBuilder::default() + .balance_factor(100) + .base_weight(base_weight) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 2; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 1000; + assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + let weight = 100; + let tip = 5; + let len = 10; + // we convert the from weight to fee based on the ratio between asset min balance and + // existential deposit + let fee_with_tip = + (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); + let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .unwrap(); + assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); + + let final_weight = 50; + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(weight), + &post_info_from_weight(final_weight), + len, + &Ok(()) + )); + let final_fee = + fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); + assert_eq!(Assets::balance(asset_id, caller), balance - (final_fee)); + assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), final_fee); + }); +} + +#[test] +fn payment_from_account_with_only_assets() { + let base_weight = 5; + ExtBuilder::default() + .balance_factor(100) + .base_weight(base_weight) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 2; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 333; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 100; + assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + // assert that native balance is not necessary + assert_eq!(Balances::free_balance(caller), 0); + let weight = 5; + let len = 10; + // we convert the from weight to fee based on the ratio between asset min balance and + // existential deposit + let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .unwrap(); + assert_eq!(Balances::free_balance(caller), 0); + // check that fee was charged in the given asset + assert_eq!(Assets::balance(asset_id, caller), balance - fee); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(weight), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance - fee); + assert_eq!(Balances::free_balance(caller), 0); + }); +} + +#[test] +fn payment_only_with_existing_sufficient_asset() { + let base_weight = 5; + ExtBuilder::default() + .balance_factor(100) + .base_weight(base_weight) + .build() + .execute_with(|| { + let asset_id = 1; + let caller = 1; + let weight = 5; + let len = 10; + // pre_dispatch fails for non-existent asset + assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .is_err()); + + // create the non-sufficient asset + let min_balance = 2; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + false, /* is_sufficient */ + min_balance + )); + // pre_dispatch fails for non-sufficient asset + assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .is_err()); + }); +} + +#[test] +fn converted_fee_is_never_zero_if_input_fee_is_not() { + let base_weight = 1; + ExtBuilder::default() + .balance_factor(100) + .base_weight(base_weight) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 1; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 333; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 100; + assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + let weight = 1; + let len = 1; + // we convert the from weight to fee based on the ratio between asset min balance and + // existential deposit + let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); + // naive fee calculation would round down to zero + assert_eq!(fee, 0); + { + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + .unwrap(); + // `Pays::No` still implies no fees + assert_eq!(Assets::balance(asset_id, caller), balance); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_pays(Pays::No), + &post_info_from_pays(Pays::No), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance); + } + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .unwrap(); + // check that at least one coin was charged in the given asset + assert_eq!(Assets::balance(asset_id, caller), balance - 1); + + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(weight), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance - 1); + }); +} + +#[test] +fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { + let base_weight = 1; + ExtBuilder::default() + .balance_factor(100) + .base_weight(base_weight) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 100; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 333; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 100; + assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + let weight = 1; + let len = 1; + // we convert the from weight to fee based on the ratio between asset min balance and + // existential deposit + let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); + // calculated fee is greater than 0 + assert!(fee > 0); + let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) + .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) + .unwrap(); + // `Pays::No` implies no pre-dispatch fees + assert_eq!(Assets::balance(asset_id, caller), balance); + let (_tip, _who, initial_payment) = ⪯ + let not_paying = match initial_payment { + &InitialPayment::Nothing => true, + _ => false, + }; + assert!(not_paying, "initial payment should be Nothing if we pass Pays::No"); + + // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the + // initial fee) + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_pays(Pays::No), + &post_info_from_pays(Pays::Yes), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance); + }); +} + +#[test] +fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { + let base_weight = 1; + ExtBuilder::default() + .balance_factor(100) + .base_weight(base_weight) + .build() + .execute_with(|| { + // create the asset + let asset_id = 1; + let min_balance = 100; + assert_ok!(Assets::force_create( + Origin::root(), + asset_id, + 42, /* owner */ + true, /* is_sufficient */ + min_balance + )); + + // mint into the caller account + let caller = 333; + let beneficiary = ::Lookup::unlookup(caller); + let balance = 100; + assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); + assert_eq!(Assets::balance(asset_id, caller), balance); + let weight = 1; + let len = 1; + let pre = ChargeAssetTxPayment::::pre_dispatch_unsigned( + CALL, + &info_from_weight(weight), + len, + ) + .unwrap(); + + assert_eq!(Assets::balance(asset_id, caller), balance); + let (_tip, _who, initial_payment) = ⪯ + let not_paying = match initial_payment { + &InitialPayment::Nothing => true, + _ => false, + }; + assert!(not_paying, "initial payment is Nothing for unsigned extrinsics"); + + // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the + // initial fee) + assert_ok!(ChargeAssetTxPayment::::post_dispatch( + pre, + &info_from_weight(weight), + &post_info_from_pays(Pays::Yes), + len, + &Ok(()) + )); + assert_eq!(Assets::balance(asset_id, caller), balance); + }); +} From 72b9c36d66e351af9d90cbf8c37e2cf90dd59582 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 19 Nov 2021 10:37:37 +0100 Subject: [PATCH 118/162] Don't generate the consensusEngine field in chain specs (#10303) --- client/chain-spec/res/chain_spec.json | 1 - client/chain-spec/res/chain_spec2.json | 1 - client/chain-spec/src/chain_spec.rs | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/client/chain-spec/res/chain_spec.json b/client/chain-spec/res/chain_spec.json index 673f35d507919..c3365a9192f6e 100644 --- a/client/chain-spec/res/chain_spec.json +++ b/client/chain-spec/res/chain_spec.json @@ -19,7 +19,6 @@ ["wss://telemetry.polkadot.io/submit/", 0] ], "protocolId": "fir", - "consensusEngine": null, "genesis": { "raw": [ { diff --git a/client/chain-spec/res/chain_spec2.json b/client/chain-spec/res/chain_spec2.json index 950a7fc827494..00b9d603ae29e 100644 --- a/client/chain-spec/res/chain_spec2.json +++ b/client/chain-spec/res/chain_spec2.json @@ -19,7 +19,6 @@ ["wss://telemetry.polkadot.io/submit/", 0] ], "protocolId": "fir", - "consensusEngine": null, "myProperty": "Test Extension", "genesis": { "raw": [ diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 4aa0aa74630e0..8d8f62a5182cf 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -168,6 +168,7 @@ struct ClientSpec { #[serde(flatten)] extensions: E, // Never used, left only for backward compatibility. + #[serde(default, skip_serializing)] consensus_engine: (), #[serde(skip_serializing)] #[allow(unused)] From 0d0299099b18816fa3de1dbbe9f8f7b7c751994c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Nov 2021 12:33:28 +0100 Subject: [PATCH 119/162] Offchain worker: Enable http2 and improve logging (#10305) * Offchain worker: Enable http2 and improve logging Apparently some webpages now return http2 by default and that silently breaks the offchain http extension. The solution to this is to enable the `http2` feature of hyper. Besides that, this pr improves the logging to make it easier to debug such errors. * FMT * Adds http2 test --- Cargo.lock | 22 +++++- client/offchain/Cargo.toml | 4 +- client/offchain/src/api.rs | 50 ++++++++------ client/offchain/src/api/http.rs | 119 ++++++++++++++++++++++++++------ client/offchain/src/lib.rs | 32 ++++++--- 5 files changed, 172 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 32234cc4c93a6..60f8217d69e8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2521,6 +2521,25 @@ dependencies = [ "web-sys", ] +[[package]] +name = "h2" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fd819562fcebdac5afc5c113c3ec36f902840b70fd4fc458799c8ce4607ae55" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "half" version = "1.7.1" @@ -2738,6 +2757,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", + "h2", "http", "http-body", "httparse", @@ -8349,7 +8369,6 @@ dependencies = [ "hyper 0.14.14", "hyper-rustls", "lazy_static", - "log 0.4.14", "num_cpus", "once_cell", "parity-scale-codec", @@ -8371,6 +8390,7 @@ dependencies = [ "substrate-test-runtime-client", "threadpool", "tokio", + "tracing", ] [[package]] diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 2a6fdddd7ad36..b92ee7041e5fd 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -19,7 +19,6 @@ hex = "0.4" fnv = "1.0.6" futures = "0.3.16" futures-timer = "3.0.2" -log = "0.4.8" num_cpus = "1.10" parking_lot = "0.11.1" rand = "0.7.2" @@ -31,9 +30,10 @@ sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sc-utils = { version = "4.0.0-dev", path = "../utils" } threadpool = "1.7" -hyper = { version = "0.14.14", features = ["stream"] } +hyper = { version = "0.14.14", features = ["stream", "http2"] } hyper-rustls = "0.22.1" once_cell = "1.8" +tracing = "0.1.29" [dev-dependencies] sc-client-db = { version = "0.10.0-dev", default-features = true, path = "../db" } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 07136d1815b91..c2830510b015c 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -37,10 +37,11 @@ mod http; mod timestamp; fn unavailable_yet(name: &str) -> R { - log::error!( - target: "sc_offchain", + tracing::error!( + target: super::LOG_TARGET, "The {:?} API is not available for offchain workers yet. Follow \ - https://github.com/paritytech/substrate/issues/1458 for details", name + https://github.com/paritytech/substrate/issues/1458 for details", + name ); Default::default() } @@ -75,9 +76,12 @@ impl Db { impl offchain::DbExternalities for Db { fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - log::debug!( - target: "sc_offchain", - "{:?}: Write: {:?} <= {:?}", kind, hex::encode(key), hex::encode(value) + tracing::debug!( + target: "offchain-worker::storage", + ?kind, + key = ?hex::encode(key), + value = ?hex::encode(value), + "Write", ); match kind { StorageKind::PERSISTENT => self.persistent.set(STORAGE_PREFIX, key, value), @@ -86,9 +90,11 @@ impl offchain::DbExternalities for Db { } fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { - log::debug!( - target: "sc_offchain", - "{:?}: Clear: {:?}", kind, hex::encode(key) + tracing::debug!( + target: "offchain-worker::storage", + ?kind, + key = ?hex::encode(key), + "Clear", ); match kind { StorageKind::PERSISTENT => self.persistent.remove(STORAGE_PREFIX, key), @@ -103,13 +109,13 @@ impl offchain::DbExternalities for Db { old_value: Option<&[u8]>, new_value: &[u8], ) -> bool { - log::debug!( - target: "sc_offchain", - "{:?}: CAS: {:?} <= {:?} vs {:?}", - kind, - hex::encode(key), - hex::encode(new_value), - old_value.as_ref().map(hex::encode), + tracing::debug!( + target: "offchain-worker::storage", + ?kind, + key = ?hex::encode(key), + new_value = ?hex::encode(new_value), + old_value = ?old_value.as_ref().map(hex::encode), + "CAS", ); match kind { StorageKind::PERSISTENT => @@ -123,12 +129,12 @@ impl offchain::DbExternalities for Db { StorageKind::PERSISTENT => self.persistent.get(STORAGE_PREFIX, key), StorageKind::LOCAL => unavailable_yet(LOCAL_DB), }; - log::debug!( - target: "sc_offchain", - "{:?}: Read: {:?} => {:?}", - kind, - hex::encode(key), - result.as_ref().map(hex::encode) + tracing::debug!( + target: "offchain-worker::storage", + ?kind, + key = ?hex::encode(key), + result = ?result.as_ref().map(hex::encode), + "Read", ); result } diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index a2975bad16528..632c94b481074 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -33,7 +33,6 @@ use fnv::FnvHashMap; use futures::{channel::mpsc, future, prelude::*}; use hyper::{client, Body, Client as HyperClient}; use hyper_rustls::HttpsConnector; -use log::error; use once_cell::sync::Lazy; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; @@ -46,6 +45,8 @@ use std::{ task::{Context, Poll}, }; +const LOG_TARGET: &str = "offchain-worker::http"; + /// Wrapper struct used for keeping the hyper_rustls client running. #[derive(Clone)] pub struct SharedClient(Arc, Body>>>); @@ -146,13 +147,24 @@ impl HttpApi { match self.next_id.0.checked_add(1) { Some(new_id) => self.next_id.0 = new_id, None => { - error!("Overflow in offchain worker HTTP request ID assignment"); + tracing::error!( + target: LOG_TARGET, + "Overflow in offchain worker HTTP request ID assignment" + ); return Err(()) }, }; self.requests .insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); + tracing::error!( + target: LOG_TARGET, + id = %new_id.0, + %method, + %uri, + "Requested started", + ); + Ok(new_id) } @@ -168,11 +180,14 @@ impl HttpApi { _ => return Err(()), }; - let name = hyper::header::HeaderName::try_from(name).map_err(drop)?; - let value = hyper::header::HeaderValue::try_from(value).map_err(drop)?; + let header_name = hyper::header::HeaderName::try_from(name).map_err(drop)?; + let header_value = hyper::header::HeaderValue::try_from(value).map_err(drop)?; // Note that we're always appending headers and never replacing old values. // We assume here that the user knows what they're doing. - request.headers_mut().append(name, value); + request.headers_mut().append(header_name, header_value); + + tracing::debug!(target: LOG_TARGET, id = %request_id.0, %name, %value, "Added header to request"); + Ok(()) } @@ -207,7 +222,7 @@ impl HttpApi { sender.send_data(hyper::body::Bytes::from(chunk.to_owned())), ) .map_err(|_| { - error!("HTTP sender refused data despite being ready"); + tracing::error!(target: "offchain-worker::http", "HTTP sender refused data despite being ready"); HttpError::IoError }) }; @@ -215,6 +230,7 @@ impl HttpApi { loop { request = match request { HttpApiRequest::NotDispatched(request, sender) => { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Added new body chunk"); // If the request is not dispatched yet, dispatch it and loop again. let _ = self .to_worker @@ -225,14 +241,20 @@ impl HttpApi { HttpApiRequest::Dispatched(Some(mut sender)) => { if !chunk.is_empty() { match poll_sender(&mut sender) { - Err(HttpError::IoError) => return Err(HttpError::IoError), + Err(HttpError::IoError) => { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); + return Err(HttpError::IoError) + }, other => { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); return other }, } } else { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Finished writing body"); + // Writing an empty body is a hint that we should stop writing. Dropping // the sender. self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); @@ -250,14 +272,20 @@ impl HttpApi { .as_mut() .expect("Can only enter this match branch if Some; qed"), ) { - Err(HttpError::IoError) => return Err(HttpError::IoError), + Err(HttpError::IoError) => { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Encountered io error while trying to add new chunk to body"); + return Err(HttpError::IoError) + }, other => { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, res = ?other, "Added chunk to body"); self.requests .insert(request_id, HttpApiRequest::Response(response)); return other }, } } else { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Finished writing body"); + // Writing an empty body is a hint that we should stop writing. Dropping // the sender. self.requests.insert( @@ -271,13 +299,18 @@ impl HttpApi { } }, - HttpApiRequest::Fail(_) => - // If the request has already failed, return without putting back the request - // in the list. - return Err(HttpError::IoError), + HttpApiRequest::Fail(error) => { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, ?error, "Request failed"); + + // If the request has already failed, return without putting back the request + // in the list. + return Err(HttpError::IoError) + }, v @ HttpApiRequest::Dispatched(None) | v @ HttpApiRequest::Response(HttpApiRequestRp { sending_body: None, .. }) => { + tracing::debug!(target: LOG_TARGET, id = %request_id.0, "Body sending already finished"); + // We have already finished sending this body. self.requests.insert(request_id, v); return Err(HttpError::Invalid) @@ -350,8 +383,19 @@ impl HttpApi { // Requests in "fail" mode are purged before returning. debug_assert_eq!(output.len(), ids.len()); for n in (0..ids.len()).rev() { - if let HttpRequestStatus::IoError = output[n] { - self.requests.remove(&ids[n]); + match output[n] { + HttpRequestStatus::IoError => { + self.requests.remove(&ids[n]); + }, + HttpRequestStatus::Invalid => { + tracing::debug!(target: LOG_TARGET, id = %ids[n].0, "Unknown request"); + }, + HttpRequestStatus::DeadlineReached => { + tracing::debug!(target: LOG_TARGET, id = %ids[n].0, "Deadline reached"); + }, + HttpRequestStatus::Finished(_) => { + tracing::debug!(target: LOG_TARGET, id = %ids[n].0, "Request finished"); + }, } } return output @@ -388,20 +432,23 @@ impl HttpApi { ); }, None => {}, // can happen if we detected an IO error when sending the body - _ => error!("State mismatch between the API and worker"), + _ => + tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"), } }, Some(WorkerToApi::Fail { id, error }) => match self.requests.remove(&id) { Some(HttpApiRequest::Dispatched(_)) => { + tracing::debug!(target: LOG_TARGET, id = %id.0, ?error, "Request failed"); self.requests.insert(id, HttpApiRequest::Fail(error)); }, None => {}, // can happen if we detected an IO error when sending the body - _ => error!("State mismatch between the API and worker"), + _ => + tracing::error!(target: "offchain-worker::http", "State mismatch between the API and worker"), }, None => { - error!("Worker has crashed"); + tracing::error!(target: "offchain-worker::http", "Worker has crashed"); return ids.iter().map(|_| HttpRequestStatus::IoError).collect() }, } @@ -474,7 +521,7 @@ impl HttpApi { }, Err(err) => { // This code should never be reached unless there's a logic error somewhere. - error!("Failed to read from current read chunk: {:?}", err); + tracing::error!(target: "offchain-worker::http", "Failed to read from current read chunk: {:?}", err); return Err(HttpError::IoError) }, } @@ -719,7 +766,10 @@ mod tests { // Returns an `HttpApi` whose worker is ran in the background, and a `SocketAddr` to an HTTP // server that runs in the background as well. macro_rules! build_api_server { - () => {{ + () => { + build_api_server!(hyper::Response::new(hyper::Body::from("Hello World!"))) + }; + ( $response:expr ) => {{ let hyper_client = SHARED_CLIENT.clone(); let (api, worker) = http(hyper_client.clone()); @@ -736,9 +786,7 @@ mod tests { // otherwise the tests are flaky. let _ = req.into_body().collect::>().await; - Ok::<_, Infallible>(hyper::Response::new(hyper::Body::from( - "Hello World!", - ))) + Ok::<_, Infallible>($response) }, )) }), @@ -776,6 +824,33 @@ mod tests { assert_eq!(&buf[..n], b"Hello World!"); } + #[test] + fn basic_http2_localhost() { + let deadline = timestamp::now().add(Duration::from_millis(10_000)); + + // Performs an HTTP query to a background HTTP server. + + let (mut api, addr) = build_api_server!(hyper::Response::builder() + .version(hyper::Version::HTTP_2) + .body(hyper::Body::from("Hello World!")) + .unwrap()); + + let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); + api.request_write_body(id, &[], Some(deadline)).unwrap(); + + match api.response_wait(&[id], Some(deadline))[0] { + HttpRequestStatus::Finished(200) => {}, + v => panic!("Connecting to localhost failed: {:?}", v), + } + + let headers = api.response_headers(id); + assert!(headers.iter().any(|(h, _)| h.eq_ignore_ascii_case(b"Date"))); + + let mut buf = vec![0; 2048]; + let n = api.response_read_body(id, &mut buf, Some(deadline)).unwrap(); + assert_eq!(&buf[..n], b"Hello World!"); + } + #[test] fn request_start_invalid_call() { let (mut api, addr) = build_api_server!(); diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 2de24e10d927d..f9230a1552e1e 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -41,7 +41,6 @@ use futures::{ future::{ready, Future}, prelude::*, }; -use log::{debug, warn}; use parking_lot::Mutex; use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; use sp_api::{ApiExt, ProvideRuntimeApi}; @@ -57,6 +56,8 @@ mod api; pub use api::Db as OffchainDb; pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; +const LOG_TARGET: &str = "offchain-worker"; + /// NetworkProvider provides [`OffchainWorkers`] with all necessary hooks into the /// underlying Substrate networking. pub trait NetworkProvider: NetworkStateInfo { @@ -149,15 +150,25 @@ where err => { let help = "Consider turning off offchain workers if they are not part of your runtime."; - log::error!("Unsupported Offchain Worker API version: {:?}. {}.", err, help); + tracing::error!( + target: LOG_TARGET, + "Unsupported Offchain Worker API version: {:?}. {}.", + err, + help + ); 0 }, }; - debug!("Checking offchain workers at {:?}: version:{}", at, version); + tracing::debug!( + target: LOG_TARGET, + "Checking offchain workers at {:?}: version:{}", + at, + version + ); let process = (version > 0).then(|| { let (api, runner) = api::AsyncApi::new(network_provider, is_validator, self.shared_http_client.clone()); - debug!("Spawning offchain workers at {:?}", at); + tracing::debug!(target: LOG_TARGET, "Spawning offchain workers at {:?}", at); let header = header.clone(); let client = self.client.clone(); @@ -167,7 +178,7 @@ where self.spawn_worker(move || { let runtime = client.runtime_api(); let api = Box::new(api); - debug!("Running offchain workers at {:?}", at); + tracing::debug!(target: LOG_TARGET, "Running offchain workers at {:?}", at); let context = ExecutionContext::OffchainCall(Some((api, capabilities))); let run = if version == 2 { @@ -181,7 +192,12 @@ where ) }; if let Err(e) = run { - log::error!("Error running offchain workers at {:?}: {:?}", at, e); + tracing::error!( + target: LOG_TARGET, + "Error running offchain workers at {:?}: {:?}", + at, + e + ); } }); @@ -232,8 +248,8 @@ pub async fn notification_future( .boxed(), ); } else { - log::debug!( - target: "sc_offchain", + tracing::debug!( + target: LOG_TARGET, "Skipping offchain workers for non-canon block: {:?}", n.header, ) From 69e384bae9f6189089d2136edb560644964b8788 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 19 Nov 2021 12:36:09 +0100 Subject: [PATCH 120/162] Bump libsecp256k1 from 0.6.0 to 0.7.0 (#10214) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bump libsecp256k1 from 0.6.0 to 0.7.0 Bumps [libsecp256k1](https://github.com/paritytech/libsecp256k1) from 0.6.0 to 0.7.0. - [Release notes](https://github.com/paritytech/libsecp256k1/releases) - [Changelog](https://github.com/paritytech/libsecp256k1/blob/master/CHANGELOG.md) - [Commits](https://github.com/paritytech/libsecp256k1/commits) --- updated-dependencies: - dependency-name: libsecp256k1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Fix pallet_contracts for new libsecp256k1 version Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alexander Theißen --- Cargo.lock | 42 ++++++++++-------------- client/executor/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 8 ++--- frame/contracts/src/benchmarking/code.rs | 14 ++++---- primitives/core/Cargo.toml | 2 +- primitives/io/Cargo.toml | 2 +- 6 files changed, 31 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 60f8217d69e8a..e9798d4c42054 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3843,25 +3843,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "libsecp256k1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2 0.9.8", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.7.0" @@ -3871,12 +3852,14 @@ dependencies = [ "arrayref", "base64 0.13.0", "digest 0.9.0", + "hmac-drbg", "libsecp256k1-core 0.3.0", "libsecp256k1-gen-ecmult 0.3.0", "libsecp256k1-gen-genmult 0.3.0", "rand 0.8.4", "serde", "sha2 0.9.8", + "typenum", ] [[package]] @@ -5414,7 +5397,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", - "libsecp256k1 0.6.0", + "libsecp256k1 0.7.0", "log 0.4.14", "pallet-balances", "pallet-contracts-primitives", @@ -5425,8 +5408,8 @@ dependencies = [ "parity-scale-codec", "pretty_assertions", "pwasm-utils", - "rand 0.7.3", - "rand_pcg 0.2.1", + "rand 0.8.4", + "rand_pcg 0.3.1", "scale-info", "serde", "smallvec 1.7.0", @@ -7233,6 +7216,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_pcg" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" +dependencies = [ + "rand_core 0.6.2", +] + [[package]] name = "rand_xorshift" version = "0.1.1" @@ -8068,7 +8060,7 @@ version = "0.10.0-dev" dependencies = [ "hex-literal", "lazy_static", - "libsecp256k1 0.6.0", + "libsecp256k1 0.7.0", "log 0.4.14", "parity-scale-codec", "parking_lot 0.11.1", @@ -9442,7 +9434,7 @@ dependencies = [ "hex-literal", "impl-serde", "lazy_static", - "libsecp256k1 0.6.0", + "libsecp256k1 0.7.0", "log 0.4.14", "merlin", "num-traits", @@ -9562,7 +9554,7 @@ version = "4.0.0-dev" dependencies = [ "futures 0.3.16", "hash-db", - "libsecp256k1 0.6.0", + "libsecp256k1 0.7.0", "log 0.4.14", "parity-scale-codec", "parking_lot 0.11.1", diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 4a8e678105437..ebdcc44ff0d8c 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -32,7 +32,7 @@ sc-executor-wasmi = { version = "0.10.0-dev", path = "wasmi" } sc-executor-wasmtime = { version = "0.10.0-dev", path = "wasmtime", optional = true } parking_lot = "0.11.1" log = "0.4.8" -libsecp256k1 = "0.6" +libsecp256k1 = "0.7" sp-core-hashing-proc-macro = { version = "4.0.0-dev", path = "../../primitives/core/hashing/proc-macro" } [dev-dependencies] diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index dec33768a0426..6639d939e1796 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -28,9 +28,9 @@ smallvec = { version = "1", default-features = false, features = [ wasmi-validation = { version = "0.4", default-features = false } # Only used in benchmarking to generate random contract code -libsecp256k1 = { version = "0.6.0", optional = true, default-features = false, features = ["hmac", "static-context"] } -rand = { version = "0.7.3", optional = true, default-features = false } -rand_pcg = { version = "0.2", optional = true } +libsecp256k1 = { version = "0.7", optional = true, default-features = false, features = ["hmac", "static-context"] } +rand = { version = "0.8", optional = true, default-features = false } +rand_pcg = { version = "0.3", optional = true } # Substrate Dependencies frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } @@ -47,7 +47,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primit [dev-dependencies] assert_matches = "1" hex-literal = "0.3" -pretty_assertions = "1.0.0" +pretty_assertions = "1" wat = "1" # Substrate Dependencies diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 98f52f4719a61..5dac8a84ace8a 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -495,11 +495,11 @@ pub mod body { vec![Instruction::I32Const(current as i32)] }, DynInstr::RandomUnaligned(low, high) => { - let unaligned = rng.gen_range(*low, *high) | 1; + let unaligned = rng.gen_range(*low..*high) | 1; vec![Instruction::I32Const(unaligned as i32)] }, DynInstr::RandomI32(low, high) => { - vec![Instruction::I32Const(rng.gen_range(*low, *high))] + vec![Instruction::I32Const(rng.gen_range(*low..*high))] }, DynInstr::RandomI32Repeated(num) => (&mut rng) .sample_iter(Standard) @@ -512,19 +512,19 @@ pub mod body { .map(|val| Instruction::I64Const(val)) .collect(), DynInstr::RandomGetLocal(low, high) => { - vec![Instruction::GetLocal(rng.gen_range(*low, *high))] + vec![Instruction::GetLocal(rng.gen_range(*low..*high))] }, DynInstr::RandomSetLocal(low, high) => { - vec![Instruction::SetLocal(rng.gen_range(*low, *high))] + vec![Instruction::SetLocal(rng.gen_range(*low..*high))] }, DynInstr::RandomTeeLocal(low, high) => { - vec![Instruction::TeeLocal(rng.gen_range(*low, *high))] + vec![Instruction::TeeLocal(rng.gen_range(*low..*high))] }, DynInstr::RandomGetGlobal(low, high) => { - vec![Instruction::GetGlobal(rng.gen_range(*low, *high))] + vec![Instruction::GetGlobal(rng.gen_range(*low..*high))] }, DynInstr::RandomSetGlobal(low, high) => { - vec![Instruction::SetGlobal(rng.gen_range(*low, *high))] + vec![Instruction::SetGlobal(rng.gen_range(*low..*high))] }, }) .chain(sp_std::iter::once(Instruction::End)) diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index dc8904f33e0c1..61451e10ecf2f 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -65,7 +65,7 @@ schnorrkel = { version = "0.9.1", features = [ sha2 = { version = "0.9.8", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } twox-hash = { version = "1.6.1", default-features = false, optional = true } -libsecp256k1 = { version = "0.6", default-features = false, features = ["hmac", "static-context"], optional = true } +libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac", "static-context"], optional = true } sp-core-hashing = { version = "4.0.0-dev", path = "./hashing", default-features = false, optional = true } merlin = { version = "2.0", default-features = false, optional = true } ss58-registry = { version = "1.5.0", default-features = false } diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 2d7c50bdc25e4..2e0982a6e5f13 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -20,7 +20,7 @@ hash-db = { version = "0.15.2", default-features = false } sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } sp-keystore = { version = "0.10.0-dev", default-features = false, optional = true, path = "../keystore" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } -libsecp256k1 = { version = "0.6", optional = true } +libsecp256k1 = { version = "0.7", optional = true } sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../state-machine" } sp-wasm-interface = { version = "4.0.0-dev", path = "../wasm-interface", default-features = false } sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } From cdcf872c3791d148dc67b4df2dff168c9008ca76 Mon Sep 17 00:00:00 2001 From: Zeke Mostov Date: Fri, 19 Nov 2021 16:14:21 +0100 Subject: [PATCH 121/162] Add prometheus metrics for block authorship (#10316) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add prom metric to basic authorship * Add proposer_block_proposal_time * +nightly-2021-10-29 fmt * Use saturating_duration_since, not elasped * Update client/basic-authorship/src/basic_authorship.rs Co-authored-by: Bastian Köcher * Update client/basic-authorship/src/basic_authorship.rs Co-authored-by: Bastian Köcher * +nightly-2021-10-29 fmt Co-authored-by: Bastian Köcher --- .../basic-authorship/src/basic_authorship.rs | 23 ++++++++++++++++++- client/proposer-metrics/src/lib.rs | 16 +++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 0fd3932807a17..70633925c5ba6 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -346,10 +346,23 @@ where block_size_limit: Option, ) -> Result, PR::Proof>, sp_blockchain::Error> { + let propose_with_start = time::Instant::now(); let mut block_builder = self.client.new_block_at(&self.parent_id, inherent_digests, PR::ENABLED)?; - for inherent in block_builder.create_inherents(inherent_data)? { + let create_inherents_start = time::Instant::now(); + let inherents = block_builder.create_inherents(inherent_data)?; + let create_inherents_end = time::Instant::now(); + + self.metrics.report(|metrics| { + metrics.create_inherents_time.observe( + create_inherents_end + .saturating_duration_since(create_inherents_start) + .as_secs_f64(), + ); + }); + + for inherent in inherents { match block_builder.push(inherent) { Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { warn!("⚠️ Dropping non-mandatory inherent from overweight block.") @@ -529,6 +542,14 @@ where let proof = PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; + + let propose_with_end = time::Instant::now(); + self.metrics.report(|metrics| { + metrics.create_block_proposal_time.observe( + propose_with_end.saturating_duration_since(propose_with_start).as_secs_f64(), + ); + }); + Ok(Proposal { block, proof, storage_changes }) } } diff --git a/client/proposer-metrics/src/lib.rs b/client/proposer-metrics/src/lib.rs index da29fb2951995..452a796b73920 100644 --- a/client/proposer-metrics/src/lib.rs +++ b/client/proposer-metrics/src/lib.rs @@ -47,6 +47,8 @@ impl MetricsLink { pub struct Metrics { pub block_constructed: Histogram, pub number_of_transactions: Gauge, + pub create_inherents_time: Histogram, + pub create_block_proposal_time: Histogram, } impl Metrics { @@ -66,6 +68,20 @@ impl Metrics { )?, registry, )?, + create_inherents_time: register( + Histogram::with_opts(HistogramOpts::new( + "proposer_create_inherents_time", + "Histogram of time taken to execute create inherents", + ))?, + registry, + )?, + create_block_proposal_time: register( + Histogram::with_opts(HistogramOpts::new( + "proposer_block_proposal_time", + "Histogram of time taken to construct a block and prepare it for proposal", + ))?, + registry, + )?, }) } } From 3cf5449fd2143d07ff2ee24eb3b3c40ecf1e57ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Nov 2021 21:30:37 +0100 Subject: [PATCH 122/162] Taskmanager: Remove `clean_shutdown` (#10314) There is no reason for this function, tokio already blocks automatically until all tasks are ended. Another reason to remove this feature is `mpsc_background_tasks` unbounded channel. Recently this channel was reporting too many unprocessed elements. We assume that this was a result of a lot of very shot lived tasks that somehow flooded this channel. --- client/cli/src/runner.rs | 3 +- client/service/src/task_manager/mod.rs | 83 +----- client/service/src/task_manager/tests.rs | 316 +++++++++-------------- test-utils/test-runner/src/node.rs | 2 - 4 files changed, 140 insertions(+), 264 deletions(-) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 6f03e02a12d05..640b87584d4b6 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -98,7 +98,7 @@ where pin_mut!(f); tokio_runtime.block_on(main(f))?; - tokio_runtime.block_on(task_manager.clean_shutdown()); + drop(task_manager); Ok(()) } @@ -154,7 +154,6 @@ impl Runner { self.print_node_infos(); let mut task_manager = self.tokio_runtime.block_on(initialize(self.config))?; let res = self.tokio_runtime.block_on(main(task_manager.future().fuse())); - self.tokio_runtime.block_on(task_manager.clean_shutdown()); Ok(res?) } diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index 25d3ecd7d5c3c..342ea6627be68 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -21,17 +21,16 @@ use crate::{config::TaskType, Error}; use exit_future::Signal; use futures::{ - future::{join_all, pending, select, try_join_all, BoxFuture, Either}, + future::{pending, select, try_join_all, BoxFuture, Either}, Future, FutureExt, StreamExt, }; -use log::debug; use prometheus_endpoint::{ exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{panic, pin::Pin, result::Result}; -use tokio::{runtime::Handle, task::JoinHandle}; +use tokio::runtime::Handle; use tracing_futures::Instrument; mod prometheus_future; @@ -73,7 +72,6 @@ pub struct SpawnTaskHandle { on_exit: exit_future::Exit, tokio_handle: Handle, metrics: Option, - task_notifier: TracingUnboundedSender>, } impl SpawnTaskHandle { @@ -113,11 +111,6 @@ impl SpawnTaskHandle { task: impl Future + Send + 'static, task_type: TaskType, ) { - if self.task_notifier.is_closed() { - debug!("Attempt to spawn a new task has been prevented: {}", name); - return - } - let on_exit = self.on_exit.clone(); let metrics = self.metrics.clone(); @@ -169,17 +162,17 @@ impl SpawnTaskHandle { } .in_current_span(); - let join_handle = match task_type { - TaskType::Async => self.tokio_handle.spawn(future), + match task_type { + TaskType::Async => { + self.tokio_handle.spawn(future); + }, TaskType::Blocking => { let handle = self.tokio_handle.clone(); self.tokio_handle.spawn_blocking(move || { handle.block_on(future); - }) + }); }, - }; - - let _ = self.task_notifier.unbounded_send(join_handle); + } } } @@ -288,8 +281,8 @@ pub struct TaskManager { /// A future that resolves when the service has exited, this is useful to /// make sure any internally spawned futures stop when the service does. on_exit: exit_future::Exit, - /// A signal that makes the exit future above resolve, fired on service drop. - signal: Option, + /// A signal that makes the exit future above resolve, fired on drop. + _signal: Signal, /// Tokio runtime handle that is used to spawn futures. tokio_handle: Handle, /// Prometheus metric where to report the polling times. @@ -301,10 +294,6 @@ pub struct TaskManager { essential_failed_rx: TracingUnboundedReceiver<()>, /// Things to keep alive until the task manager is dropped. keep_alive: Box, - /// A sender to a stream of background tasks. This is used for the completion future. - task_notifier: TracingUnboundedSender>, - /// This future will complete when all the tasks are joined and the stream is closed. - completion_future: JoinHandle<()>, /// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. @@ -325,25 +314,14 @@ impl TaskManager { let metrics = prometheus_registry.map(Metrics::register).transpose()?; - let (task_notifier, background_tasks) = tracing_unbounded("mpsc_background_tasks"); - // NOTE: for_each_concurrent will await on all the JoinHandle futures at the same time. It - // is possible to limit this but it's actually better for the memory foot print to await - // them all to not accumulate anything on that stream. - let completion_future = - tokio_handle.spawn(background_tasks.for_each_concurrent(None, |x| async move { - let _ = x.await; - })); - Ok(Self { on_exit, - signal: Some(signal), + _signal: signal, tokio_handle, metrics, essential_failed_tx, essential_failed_rx, keep_alive: Box::new(()), - task_notifier, - completion_future, children: Vec::new(), }) } @@ -354,7 +332,6 @@ impl TaskManager { on_exit: self.on_exit.clone(), tokio_handle: self.tokio_handle.clone(), metrics: self.metrics.clone(), - task_notifier: self.task_notifier.clone(), } } @@ -363,36 +340,12 @@ impl TaskManager { SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle()) } - /// Send the signal for termination, prevent new tasks to be created, await for all the existing - /// tasks to be finished and drop the object. You can consider this as an async drop. - /// - /// It's always better to call and await this function before exiting the process as background - /// tasks may be running in the background. If the process exit and the background tasks are not - /// cancelled, this will lead to objects not getting dropped properly. - /// - /// This is an issue in some cases as some of our dependencies do require that we drop all the - /// objects properly otherwise it triggers a SIGABRT on exit. - pub fn clean_shutdown(mut self) -> Pin + Send>> { - self.terminate(); - let children_shutdowns = self.children.into_iter().map(|x| x.clean_shutdown()); - let keep_alive = self.keep_alive; - let completion_future = self.completion_future; - - Box::pin(async move { - join_all(children_shutdowns).await; - let _ = completion_future.await; - - let _ = keep_alive; - }) - } - /// Return a future that will end with success if the signal to terminate was sent /// (`self.terminate()`) or with an error if an essential task fails. /// /// # Warning /// - /// This function will not wait until the end of the remaining task. You must call and await - /// `clean_shutdown()` after this. + /// This function will not wait until the end of the remaining task. pub fn future<'a>( &'a mut self, ) -> Pin> + Send + 'a>> { @@ -417,18 +370,6 @@ impl TaskManager { }) } - /// Signal to terminate all the running tasks. - pub fn terminate(&mut self) { - if let Some(signal) = self.signal.take() { - let _ = signal.fire(); - // NOTE: this will prevent new tasks to be spawned - self.task_notifier.close_channel(); - for child in self.children.iter_mut() { - child.terminate(); - } - } - } - /// Set what the task manager should keep alive, can be called multiple times. pub fn keep_alive(&mut self, to_keep_alive: T) { // allows this fn to safely called multiple times. diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs index 75092ff2ae62e..f14023f34f6bd 100644 --- a/client/service/src/task_manager/tests.rs +++ b/client/service/src/task_manager/tests.rs @@ -90,225 +90,163 @@ fn new_task_manager(tokio_handle: tokio::runtime::Handle) -> TaskManager { #[test] fn ensure_tasks_are_awaited_on_shutdown() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let task_manager = new_task_manager(handle); - let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); - spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); - spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); - assert_eq!(drop_tester, 2); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 2); - runtime.block_on(task_manager.clean_shutdown()); + { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + + let task_manager = new_task_manager(handle); + let spawn_handle = task_manager.spawn_handle(); + spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + } drop_tester.wait_on_drop(); } #[test] fn ensure_keep_alive_during_shutdown() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let mut task_manager = new_task_manager(handle); - let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); - task_manager.keep_alive(drop_tester.new_ref()); - spawn_handle.spawn("task1", None, run_background_task(())); - assert_eq!(drop_tester, 1); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 1); - runtime.block_on(task_manager.clean_shutdown()); + { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + + let mut task_manager = new_task_manager(handle); + let spawn_handle = task_manager.spawn_handle(); + task_manager.keep_alive(drop_tester.new_ref()); + spawn_handle.spawn("task1", None, run_background_task(())); + assert_eq!(drop_tester, 1); + // allow the tasks to even start + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 1); + } drop_tester.wait_on_drop(); } #[test] fn ensure_blocking_futures_are_awaited_on_shutdown() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let task_manager = new_task_manager(handle); - let spawn_handle = task_manager.spawn_handle(); - let drop_tester = DropTester::new(); - spawn_handle.spawn( - "task1", - None, - run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()), - ); - spawn_handle.spawn( - "task2", - None, - run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()), - ); - assert_eq!(drop_tester, 2); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 2); - runtime.block_on(task_manager.clean_shutdown()); - assert_eq!(drop_tester, 0); -} - -#[test] -fn ensure_no_task_can_be_spawn_after_terminate() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let mut task_manager = new_task_manager(handle); - let spawn_handle = task_manager.spawn_handle(); - let drop_tester = DropTester::new(); - spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); - spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); - assert_eq!(drop_tester, 2); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 2); - task_manager.terminate(); - spawn_handle.spawn("task3", None, run_background_task(drop_tester.new_ref())); - runtime.block_on(task_manager.clean_shutdown()); - drop_tester.wait_on_drop(); -} - -#[test] -fn ensure_task_manager_future_ends_when_task_manager_terminated() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let mut task_manager = new_task_manager(handle); - let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); - spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); - spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); - assert_eq!(drop_tester, 2); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 2); - task_manager.terminate(); - runtime.block_on(task_manager.future()).expect("future has ended without error"); - runtime.block_on(task_manager.clean_shutdown()); + { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + + let task_manager = new_task_manager(handle); + let spawn_handle = task_manager.spawn_handle(); + spawn_handle.spawn( + "task1", + None, + run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()), + ); + spawn_handle.spawn( + "task2", + None, + run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()), + ); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + } assert_eq!(drop_tester, 0); } #[test] fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let mut task_manager = new_task_manager(handle); - let spawn_handle = task_manager.spawn_handle(); - let spawn_essential_handle = task_manager.spawn_essential_handle(); let drop_tester = DropTester::new(); - spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); - spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); - assert_eq!(drop_tester, 2); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 2); - spawn_essential_handle.spawn("task3", None, async { panic!("task failed") }); - runtime - .block_on(task_manager.future()) - .expect_err("future()'s Result must be Err"); - assert_eq!(drop_tester, 2); - runtime.block_on(task_manager.clean_shutdown()); - drop_tester.wait_on_drop(); -} - -#[test] -fn ensure_children_tasks_ends_when_task_manager_terminated() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let mut task_manager = new_task_manager(handle.clone()); - let child_1 = new_task_manager(handle.clone()); - let spawn_handle_child_1 = child_1.spawn_handle(); - let child_2 = new_task_manager(handle.clone()); - let spawn_handle_child_2 = child_2.spawn_handle(); - task_manager.add_child(child_1); - task_manager.add_child(child_2); - let spawn_handle = task_manager.spawn_handle(); - let drop_tester = DropTester::new(); - spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); - spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); - spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref())); - spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref())); - assert_eq!(drop_tester, 4); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 4); - task_manager.terminate(); - runtime.block_on(task_manager.future()).expect("future has ended without error"); - runtime.block_on(task_manager.clean_shutdown()); + { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + + let mut task_manager = new_task_manager(handle); + let spawn_handle = task_manager.spawn_handle(); + let spawn_essential_handle = task_manager.spawn_essential_handle(); + spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + spawn_essential_handle.spawn("task3", None, async { panic!("task failed") }); + runtime + .block_on(task_manager.future()) + .expect_err("future()'s Result must be Err"); + assert_eq!(drop_tester, 2); + } drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let mut task_manager = new_task_manager(handle.clone()); - let child_1 = new_task_manager(handle.clone()); - let spawn_handle_child_1 = child_1.spawn_handle(); - let spawn_essential_handle_child_1 = child_1.spawn_essential_handle(); - let child_2 = new_task_manager(handle.clone()); - let spawn_handle_child_2 = child_2.spawn_handle(); - task_manager.add_child(child_1); - task_manager.add_child(child_2); - let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); - spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); - spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); - spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref())); - spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref())); - assert_eq!(drop_tester, 4); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 4); - spawn_essential_handle_child_1.spawn("task5", None, async { panic!("task failed") }); - runtime - .block_on(task_manager.future()) - .expect_err("future()'s Result must be Err"); - assert_eq!(drop_tester, 4); - runtime.block_on(task_manager.clean_shutdown()); + { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + + let mut task_manager = new_task_manager(handle.clone()); + let child_1 = new_task_manager(handle.clone()); + let spawn_handle_child_1 = child_1.spawn_handle(); + let spawn_essential_handle_child_1 = child_1.spawn_essential_handle(); + let child_2 = new_task_manager(handle.clone()); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_child(child_1); + task_manager.add_child(child_2); + let spawn_handle = task_manager.spawn_handle(); + spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + spawn_essential_handle_child_1.spawn("task5", None, async { panic!("task failed") }); + runtime + .block_on(task_manager.future()) + .expect_err("future()'s Result must be Err"); + assert_eq!(drop_tester, 4); + } drop_tester.wait_on_drop(); } #[test] fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { - let runtime = tokio::runtime::Runtime::new().unwrap(); - let handle = runtime.handle().clone(); - - let mut task_manager = new_task_manager(handle.clone()); - let child_1 = new_task_manager(handle.clone()); - let spawn_handle_child_1 = child_1.spawn_handle(); - let child_2 = new_task_manager(handle.clone()); - let spawn_handle_child_2 = child_2.spawn_handle(); - task_manager.add_child(child_1); - task_manager.add_child(child_2); - let spawn_handle = task_manager.spawn_handle(); let drop_tester = DropTester::new(); - spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); - spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); - spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref())); - spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref())); - assert_eq!(drop_tester, 4); - // allow the tasks to even start - runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); - assert_eq!(drop_tester, 4); - spawn_handle_child_1.spawn("task5", None, async { panic!("task failed") }); - runtime.block_on(async { - let t1 = task_manager.future().fuse(); - let t2 = tokio::time::sleep(Duration::from_secs(3)).fuse(); - - pin_mut!(t1, t2); - - select! { - res = t1 => panic!("task should not have stopped: {:?}", res), - _ = t2 => {}, - } - }); - assert_eq!(drop_tester, 4); - runtime.block_on(task_manager.clean_shutdown()); + { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + + let mut task_manager = new_task_manager(handle.clone()); + let child_1 = new_task_manager(handle.clone()); + let spawn_handle_child_1 = child_1.spawn_handle(); + let child_2 = new_task_manager(handle.clone()); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_child(child_1); + task_manager.add_child(child_2); + let spawn_handle = task_manager.spawn_handle(); + spawn_handle.spawn("task1", None, run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", None, run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", None, run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", None, run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::sleep(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + spawn_handle_child_1.spawn("task5", None, async { panic!("task failed") }); + runtime.block_on(async { + let t1 = task_manager.future().fuse(); + let t2 = tokio::time::sleep(Duration::from_secs(3)).fuse(); + + pin_mut!(t1, t2); + + select! { + res = t1 => panic!("task should not have stopped: {:?}", res), + _ = t2 => {}, + } + }); + assert_eq!(drop_tester, 4); + } drop_tester.wait_on_drop(); } diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs index 07259263c5e4d..5fd8e4669c33d 100644 --- a/test-utils/test-runner/src/node.rs +++ b/test-utils/test-runner/src/node.rs @@ -261,8 +261,6 @@ where let signal = tokio::signal::ctrl_c(); futures::pin_mut!(signal); futures::future::select(task, signal).await; - // we don't really care whichever comes first. - task_manager.clean_shutdown().await } } } From 197b8fd3675104a45ad547b2a424a4c70734cba8 Mon Sep 17 00:00:00 2001 From: Doordashcon <90750465+Doordashcon@users.noreply.github.com> Date: Sat, 20 Nov 2021 09:00:43 +0100 Subject: [PATCH 123/162] tuple to struct event variants (#10257) * AFNPEV recovery * AFNPEV session * cargo +nightly fmt && cargo fmt * removed redundant comments * update * update & cargo +nightly fmt * update & cargo +nightly fmt * update recovery/src/lib.rs * update session/src/lib.rs --- frame/recovery/src/lib.rs | 54 +++++++++++++++++++++++++-------------- frame/session/src/lib.rs | 6 ++--- 2 files changed, 38 insertions(+), 22 deletions(-) diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 522d7008017f3..24090e9000fa4 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -262,22 +262,22 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// A recovery process has been set up for an \[account\]. - RecoveryCreated(T::AccountId), + /// A recovery process has been set up for an account. + RecoveryCreated { account: T::AccountId }, /// A recovery process has been initiated for lost account by rescuer account. - /// \[lost, rescuer\] - RecoveryInitiated(T::AccountId, T::AccountId), + RecoveryInitiated { lost_account: T::AccountId, rescuer_account: T::AccountId }, /// A recovery process for lost account by rescuer account has been vouched for by sender. - /// \[lost, rescuer, sender\] - RecoveryVouched(T::AccountId, T::AccountId, T::AccountId), + RecoveryVouched { + lost_account: T::AccountId, + rescuer_account: T::AccountId, + sender: T::AccountId, + }, /// A recovery process for lost account by rescuer account has been closed. - /// \[lost, rescuer\] - RecoveryClosed(T::AccountId, T::AccountId), + RecoveryClosed { lost_account: T::AccountId, rescuer_account: T::AccountId }, /// Lost account has been successfully recovered by rescuer account. - /// \[lost, rescuer\] - AccountRecovered(T::AccountId, T::AccountId), - /// A recovery process has been removed for an \[account\]. - RecoveryRemoved(T::AccountId), + AccountRecovered { lost_account: T::AccountId, rescuer_account: T::AccountId }, + /// A recovery process has been removed for an account. + RecoveryRemoved { lost_account: T::AccountId }, } #[pallet::error] @@ -409,7 +409,10 @@ pub mod pallet { ensure_root(origin)?; // Create the recovery storage item. >::insert(&rescuer, &lost); - Self::deposit_event(Event::::AccountRecovered(lost, rescuer)); + Self::deposit_event(Event::::AccountRecovered { + lost_account: lost, + rescuer_account: rescuer, + }); Ok(()) } @@ -472,7 +475,7 @@ pub mod pallet { // Create the recovery configuration storage item >::insert(&who, recovery_config); - Self::deposit_event(Event::::RecoveryCreated(who)); + Self::deposit_event(Event::::RecoveryCreated { account: who }); Ok(()) } @@ -519,7 +522,10 @@ pub mod pallet { }; // Create the active recovery storage item >::insert(&account, &who, recovery_status); - Self::deposit_event(Event::::RecoveryInitiated(account, who)); + Self::deposit_event(Event::::RecoveryInitiated { + lost_account: account, + rescuer_account: who, + }); Ok(()) } @@ -568,7 +574,11 @@ pub mod pallet { } // Update storage with the latest details >::insert(&lost, &rescuer, active_recovery); - Self::deposit_event(Event::::RecoveryVouched(lost, rescuer, who)); + Self::deposit_event(Event::::RecoveryVouched { + lost_account: lost, + rescuer_account: rescuer, + sender: who, + }); Ok(()) } @@ -617,7 +627,10 @@ pub mod pallet { frame_system::Pallet::::inc_consumers(&who).map_err(|_| Error::::BadState)?; // Create the recovery storage item Proxy::::insert(&who, &account); - Self::deposit_event(Event::::AccountRecovered(account, who)); + Self::deposit_event(Event::::AccountRecovered { + lost_account: account, + rescuer_account: who, + }); Ok(()) } @@ -656,7 +669,10 @@ pub mod pallet { BalanceStatus::Free, ); debug_assert!(res.is_ok()); - Self::deposit_event(Event::::RecoveryClosed(who, rescuer)); + Self::deposit_event(Event::::RecoveryClosed { + lost_account: who, + rescuer_account: rescuer, + }); Ok(()) } @@ -692,7 +708,7 @@ pub mod pallet { // Unreserve the initial deposit for the recovery configuration. T::Currency::unreserve(&who, recovery_config.deposit); - Self::deposit_event(Event::::RecoveryRemoved(who)); + Self::deposit_event(Event::::RecoveryRemoved { lost_account: who }); Ok(()) } diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 2fd34365705bb..f56d282c0f111 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -532,9 +532,9 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// New session has happened. Note that the argument is the \[session_index\], not the + /// New session has happened. Note that the argument is the session index, not the /// block number as the type might suggest. - NewSession(SessionIndex), + NewSession { session_index: SessionIndex }, } /// Old name generated by `decl_event`. @@ -703,7 +703,7 @@ impl Pallet { >::put(next_changed); // Record that this happened. - Self::deposit_event(Event::NewSession(session_index)); + Self::deposit_event(Event::NewSession { session_index }); // Tell everyone about the new session keys. T::SessionHandler::on_new_session::(changed, &session_keys, &queued_amalgamated); From 664ac530640823f55bcb8b7a2a4cc215fd5b11d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 20 Nov 2021 23:08:49 +0000 Subject: [PATCH 124/162] Bump derive_more from 0.99.11 to 0.99.16 (#10282) Bumps [derive_more](https://github.com/JelteF/derive_more) from 0.99.11 to 0.99.16. - [Release notes](https://github.com/JelteF/derive_more/releases) - [Changelog](https://github.com/JelteF/derive_more/blob/master/CHANGELOG.md) - [Commits](https://github.com/JelteF/derive_more/compare/v0.99.11...v0.99.16) --- updated-dependencies: - dependency-name: derive_more dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 12 ++++++++++-- bin/node/bench/Cargo.toml | 2 +- client/authority-discovery/Cargo.toml | 2 +- client/consensus/aura/Cargo.toml | 2 +- client/consensus/babe/Cargo.toml | 2 +- client/consensus/babe/rpc/Cargo.toml | 2 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/consensus/pow/Cargo.toml | 2 +- client/executor/common/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/keystore/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/transaction-pool/api/Cargo.toml | 2 +- primitives/keystore/Cargo.toml | 2 +- test-utils/runtime/transaction-pool/Cargo.toml | 2 +- 15 files changed, 24 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e9798d4c42054..56eb2a5543c8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -993,6 +993,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.1" @@ -1476,12 +1482,14 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.11" +version = "0.99.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" +checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version 0.3.3", "syn", ] diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 66a14a123ee56..037a233a4ca0d 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -19,7 +19,7 @@ sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-m serde = "1.0.126" serde_json = "1.0.68" structopt = "0.3" -derive_more = "0.99.2" +derive_more = "0.99.16" kvdb = "0.10.0" kvdb-rocksdb = "0.14.0" sp-trie = { version = "4.0.0-dev", path = "../../../primitives/trie" } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index b1d9d4ebd3935..fff1ed08a8c11 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -19,7 +19,7 @@ prost-build = "0.9" [dependencies] async-trait = "0.1" codec = { package = "parity-scale-codec", default-features = false, version = "2.0.0" } -derive_more = "0.99.2" +derive_more = "0.99.16" futures = "0.3.9" futures-timer = "3.0.1" ip_network = "0.4.0" diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 152c40f78f9d4..f4f12a0a601c9 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } -derive_more = "0.99.2" +derive_more = "0.99.16" futures = "0.3.9" sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } log = "0.4.8" diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 7945ecc4ec793..5020a4a564782 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -49,7 +49,7 @@ log = "0.4.8" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } rand = "0.7.2" merlin = "2.0" -derive_more = "0.99.2" +derive_more = "0.99.16" retain_mut = "0.1.4" async-trait = "0.1.50" diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index bc57092d34001..3a7cf86a800c1 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -24,7 +24,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../../primitives/blockcha sp-runtime = { version = "4.0.0-dev", path = "../../../../primitives/runtime" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../../epochs" } futures = "0.3.16" -derive_more = "0.99.2" +derive_more = "0.99.16" sp-api = { version = "4.0.0-dev", path = "../../../../primitives/api" } sp-consensus = { version = "0.10.0-dev", path = "../../../../primitives/consensus/common" } sp-core = { version = "4.0.0-dev", path = "../../../../primitives/core" } diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 90edc15863cdb..48ba910655441 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -derive_more = "0.99.2" +derive_more = "0.99.16" futures = "0.3.9" jsonrpc-core = "18.0.0" jsonrpc-core-client = "18.0.0" diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 841631fce7cc9..25e39641ae43e 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -28,6 +28,6 @@ log = "0.4.8" futures = "0.3.16" futures-timer = "3.0.1" parking_lot = "0.11.1" -derive_more = "0.99.2" +derive_more = "0.99.16" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.10.0-dev"} async-trait = "0.1.50" diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 1bb057fabad84..3d2cec9ac60c9 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -derive_more = "0.99.2" +derive_more = "0.99.16" pwasm-utils = "0.18.2" codec = { package = "parity-scale-codec", version = "2.0.0" } wasmi = "0.9.1" diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index ec4bac715ad40..1dd8c2518ab72 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -derive_more = "0.99.2" +derive_more = "0.99.16" dyn-clone = "1.0" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } futures = "0.3.9" diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index bd158091e747c..9e2928d41e898 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.50" -derive_more = "0.99.2" +derive_more = "0.99.16" sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index eb91dd145e549..13eb408b0bb58 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -25,7 +25,7 @@ bytes = "1" codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive", ] } -derive_more = "0.99.2" +derive_more = "0.99.16" either = "1.5.3" fnv = "1.0.6" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index 1ab1ef5bb4a16..f44ec588e9543 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -15,5 +15,5 @@ serde = { version = "1.0.126", features = ["derive"] } thiserror = { version = "1.0.30" } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } -derive_more = { version = "0.99.11" } +derive_more = { version = "0.99.16" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index e16ff4676c3b1..ab026ccedd512 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.50" -derive_more = "0.99.2" +derive_more = "0.99.16" codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } futures = { version = "0.3.1" } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 402caa93d10d8..ee1ac4814db5f 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -20,4 +20,4 @@ sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool", features = ["test-helpers"] } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } futures = "0.3.16" -derive_more = "0.99.2" +derive_more = "0.99.16" From 541a72f9eb41678e4601593735655a5cf794bd4a Mon Sep 17 00:00:00 2001 From: dharjeezy Date: Sun, 21 Nov 2021 14:42:21 +0100 Subject: [PATCH 125/162] use proper intra doc link (#10271) * use proper intra doc link * use proper intra doc link * get system path name from path module name function for the docs * used format macro in formatting doc string for better output * helper function to get intra doc string * helper function to get intra doc string * use helper function on expand_origin_pallet_conversions * remove duplicates * Update frame/support/procedural/src/construct_runtime/expand/origin.rs Co-authored-by: Guillaume Thiolliere * remove leading white space Co-authored-by: Damilare Co-authored-by: Guillaume Thiolliere --- .../src/construct_runtime/expand/origin.rs | 53 +++++++++++++++---- 1 file changed, 44 insertions(+), 9 deletions(-) diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index eb0212c3efee3..5c2b4c9f93b0b 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -68,14 +68,39 @@ pub fn expand_outer_origin( } let system_path = &system_pallet.path; + let system_index = system_pallet.index; + let system_path_name = system_path.module_name(); + + let doc_string = get_intra_doc_string( + "Origin is always created with the base filter configured in", + &system_path_name, + ); + + let doc_string_none_origin = + get_intra_doc_string("Create with system none origin and", &system_path_name); + + let doc_string_root_origin = + get_intra_doc_string("Create with system root origin and", &system_path_name); + + let doc_string_signed_origin = + get_intra_doc_string("Create with system signed origin and", &system_path_name); + + let doc_string_runtime_origin = + get_intra_doc_string("Convert to runtime origin, using as filter:", &system_path_name); + + let doc_string_runtime_origin_with_caller = get_intra_doc_string( + "Convert to runtime origin with caller being system signed or none and use filter", + &system_path_name, + ); + Ok(quote! { #( #query_origin_part_macros )* - /// The runtime origin type represanting the origin of a call. + /// The runtime origin type representing the origin of a call. /// - /// Origin is always created with the base filter configured in `frame_system::Config::BaseCallFilter`. + #[doc = #doc_string] #[derive(Clone)] pub struct Origin { caller: OriginCaller, @@ -182,15 +207,18 @@ pub fn expand_outer_origin( // For backwards compatibility and ease of accessing these functions. #[allow(dead_code)] impl Origin { - /// Create with system none origin and `frame-system::Config::BaseCallFilter`. + + #[doc = #doc_string_none_origin] pub fn none() -> Self { ::none() } - /// Create with system root origin and `frame-system::Config::BaseCallFilter`. + + #[doc = #doc_string_root_origin] pub fn root() -> Self { ::root() } - /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + + #[doc = #doc_string_signed_origin] pub fn signed(by: <#runtime as #system_path::Config>::AccountId) -> Self { ::signed(by) } @@ -216,7 +244,8 @@ pub fn expand_outer_origin( } impl From<#system_path::Origin<#runtime>> for Origin { - /// Convert to runtime origin, using as filter: `frame-system::Config::BaseCallFilter`. + + #[doc = #doc_string_runtime_origin] fn from(x: #system_path::Origin<#runtime>) -> Self { let o: OriginCaller = x.into(); o.into() @@ -247,8 +276,7 @@ pub fn expand_outer_origin( } } impl From::AccountId>> for Origin { - /// Convert to runtime origin with caller being system signed or none and use filter - /// `frame-system::Config::BaseCallFilter`. + #[doc = #doc_string_runtime_origin_with_caller] fn from(x: Option<<#runtime as #system_path::Config>::AccountId>) -> Self { <#system_path::Origin<#runtime>>::from(x).into() } @@ -303,6 +331,8 @@ fn expand_origin_pallet_conversions( None => quote!(#path::Origin), }; + let doc_string = get_intra_doc_string(" Convert to runtime origin using", &path.module_name()); + quote! { impl From<#pallet_origin> for OriginCaller { fn from(x: #pallet_origin) -> Self { @@ -311,7 +341,7 @@ fn expand_origin_pallet_conversions( } impl From<#pallet_origin> for Origin { - /// Convert to runtime origin using `frame-system::Config::BaseCallFilter`. + #[doc = #doc_string] fn from(x: #pallet_origin) -> Self { let x: OriginCaller = x.into(); x.into() @@ -343,3 +373,8 @@ fn expand_origin_pallet_conversions( } } } + +// Get the actual documentation using the doc information and system path name +fn get_intra_doc_string(doc_info: &str, system_path_name: &String) -> String { + format!(" {} [`{}::Config::BaseCallFilter`].", doc_info, system_path_name) +} From 35fcfd66d380c8336030810030d8f09171b51734 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Mon, 22 Nov 2021 19:48:53 +0900 Subject: [PATCH 126/162] Fix some doc link (#10329) * fix some doc link * fmt --- client/consensus/pow/src/lib.rs | 4 ++-- client/executor/wasmtime/src/runtime.rs | 8 ++++---- client/telemetry/src/lib.rs | 2 +- frame/bags-list/src/lib.rs | 11 ++++++----- frame/bags-list/src/list/mod.rs | 6 +++--- frame/beefy-mmr/primitives/src/lib.rs | 5 +++-- frame/beefy-mmr/src/lib.rs | 2 +- frame/benchmarking/src/lib.rs | 6 +++--- frame/contracts/src/lib.rs | 2 +- frame/election-provider-multi-phase/src/lib.rs | 8 ++++---- frame/election-provider-support/src/lib.rs | 6 +++--- frame/election-provider-support/src/onchain.rs | 2 +- frame/executive/src/lib.rs | 2 +- frame/staking/src/pallet/mod.rs | 5 +++-- frame/support/procedural/src/construct_runtime/mod.rs | 1 + frame/support/src/lib.rs | 2 +- frame/support/src/storage/bounded_vec.rs | 2 +- frame/support/src/storage/weak_bounded_vec.rs | 2 +- frame/support/src/traits/misc.rs | 2 +- frame/system/src/lib.rs | 3 ++- primitives/beefy/src/commitment.rs | 9 +++++---- primitives/npos-elections/solution-type/src/lib.rs | 2 +- primitives/npos-elections/src/assignments.rs | 2 +- primitives/sandbox/src/lib.rs | 2 +- utils/frame/generate-bags/src/lib.rs | 2 +- 25 files changed, 52 insertions(+), 46 deletions(-) diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 3ab0b977255ee..6ccdf53c0f047 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -27,9 +27,9 @@ //! started via the [`start_mining_worker`] function. It returns a worker //! handle together with a future. The future must be pulled. Through //! the worker handle, you can pull the metadata needed to start the -//! mining process via [`MiningWorker::metadata`], and then do the actual +//! mining process via [`MiningHandle::metadata`], and then do the actual //! mining on a standalone thread. Finally, when a seal is found, call -//! [`MiningWorker::submit`] to build the block. +//! [`MiningHandle::submit`] to build the block. //! //! The auxiliary storage for PoW engine only stores the total difficulty. //! For other storage requirements for particular PoW algorithm (such as diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 4d107862173b0..7808ac7ce547d 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -459,11 +459,11 @@ pub struct Config { /// The total amount of memory in bytes an instance can request. /// /// If specified, the runtime will be able to allocate only that much of wasm memory. - /// This is the total number and therefore the [`heap_pages`] is accounted for. + /// This is the total number and therefore the [`Config::heap_pages`] is accounted for. /// - /// That means that the initial number of pages of a linear memory plus the [`heap_pages`] - /// multiplied by the wasm page size (64KiB) should be less than or equal to `max_memory_size`, - /// otherwise the instance won't be created. + /// That means that the initial number of pages of a linear memory plus the + /// [`Config::heap_pages`] multiplied by the wasm page size (64KiB) should be less than or + /// equal to `max_memory_size`, otherwise the instance won't be created. /// /// Moreover, `memory.grow` will fail (return -1) if the sum of sizes of currently mounted /// and additional pages exceeds `max_memory_size`. diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 9fb86f57d8392..2e50bf5884fa7 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -440,7 +440,7 @@ enum Register { /// Report a telemetry. /// -/// Translates to [`tracing::info`], but contains an additional verbosity parameter which the log +/// Translates to `tracing::info`, but contains an additional verbosity parameter which the log /// record is tagged with. Additionally the verbosity parameter is added to the record as a /// key-value pair. /// diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 8be1afbe29bbe..2cff68b54c9c7 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -19,11 +19,11 @@ //! //! A semi-sorted list, where items hold an `AccountId` based on some `VoteWeight`. The `AccountId` //! (`id` for short) might be synonym to a `voter` or `nominator` in some context, and `VoteWeight` -//! signifies the chance of each id being included in the final [`VoteWeightProvider::iter`]. +//! signifies the chance of each id being included in the final [`SortedListProvider::iter`]. //! -//! It implements [`sp_election_provider_support::SortedListProvider`] to provide a semi-sorted list -//! of accounts to another pallet. It needs some other pallet to give it some information about the -//! weights of accounts via [`sp_election_provider_support::VoteWeightProvider`]. +//! It implements [`frame_election_provider_support::SortedListProvider`] to provide a semi-sorted +//! list of accounts to another pallet. It needs some other pallet to give it some information about +//! the weights of accounts via [`frame_election_provider_support::VoteWeightProvider`]. //! //! This pallet is not configurable at genesis. Whoever uses it should call appropriate functions of //! the `SortedListProvider` (e.g. `on_insert`, or `regenerate`) at their genesis. @@ -38,7 +38,8 @@ //! //! # Details //! -//! - items are kept in bags, which are delineated by their range of weight (See [`BagThresholds`]). +//! - items are kept in bags, which are delineated by their range of weight (See +//! [`Config::BagThresholds`]). //! - for iteration, bags are chained together from highest to lowest and elements within the bag //! are iterated from head to tail. //! - items within a bag are iterated in order of insertion. Thus removing an item and re-inserting diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index df966eea80cee..b381b36dc9ee2 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -17,8 +17,8 @@ //! Implementation of a "bags list": a semi-sorted list where ordering granularity is dictated by //! configurable thresholds that delineate the boundaries of bags. It uses a pattern of composite -//! data structures, where multiple storage items are masked by one outer API. See [`ListNodes`], -//! [`ListBags`] for more information. +//! data structures, where multiple storage items are masked by one outer API. See +//! [`crate::ListNodes`], [`crate::ListBags`] for more information. //! //! The outer API of this module is the [`List`] struct. It wraps all acceptable operations on top //! of the aggregate linked list. All operations with the bags list should happen through this @@ -460,7 +460,7 @@ impl List { } } -/// A Bag is a doubly-linked list of ids, where each id is mapped to a [`ListNode`]. +/// A Bag is a doubly-linked list of ids, where each id is mapped to a [`Node`]. /// /// Note that we maintain both head and tail pointers. While it would be possible to get away with /// maintaining only a head pointer and cons-ing elements onto the front of the list, it's more diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index 4d4d4e8721ac8..ee1484607563f 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -44,7 +44,8 @@ pub type Hash = [u8; 32]; /// Generic hasher trait. /// /// Implement the function to support custom way of hashing data. -/// The implementation must return a [Hash] type, so only 32-byte output hashes are supported. +/// The implementation must return a [Hash](type@Hash) type, so only 32-byte output hashes are +/// supported. pub trait Hasher { /// Hash given arbitrary-length piece of data. fn hash(data: &[u8]) -> Hash; @@ -173,7 +174,7 @@ impl Visitor for () { /// /// # Panic /// -/// The function will panic if given [`leaf_index`] is greater than the number of leaves. +/// The function will panic if given `leaf_index` is greater than the number of leaves. pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof where H: Hasher, diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 001831639b169..b846aa4a7dd6b 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -149,7 +149,7 @@ pub mod pallet { /// Details of next BEEFY authority set. /// - /// This storage entry is used as cache for calls to [`update_beefy_next_authority_set`]. + /// This storage entry is used as cache for calls to `update_beefy_next_authority_set`. #[pallet::storage] #[pallet::getter(fn beefy_next_authorities)] pub type BeefyNextAuthorities = diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 088dbeb0bb78d..56545914dcba3 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1266,21 +1266,21 @@ macro_rules! impl_benchmark_test { /// fn bench_accumulate_dummy() { /// new_test_ext().execute_with(|| { /// assert_ok!(test_benchmark_accumulate_dummy::()); -/// } +/// }) /// } /// /// #[test] /// fn bench_set_dummy() { /// new_test_ext().execute_with(|| { /// assert_ok!(test_benchmark_set_dummy::()); -/// } +/// }) /// } /// /// #[test] /// fn bench_sort_vector() { /// new_test_ext().execute_with(|| { /// assert_ok!(test_benchmark_sort_vector::()); -/// } +/// }) /// } /// } /// ``` diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 62b74b9b7b954..b2e221dde6ad9 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -202,7 +202,7 @@ pub mod pallet { type Schedule: Get>; /// The deposit that must be placed into the contract's account to instantiate it. - /// This is in **addition** to the [`pallet_balances::Pallet::ExistenialDeposit`]. + /// This is in **addition** to the [`Currency::minimum_balance`]. /// The minimum balance for a contract's account can be queried using /// [`Pallet::subsistence_threshold`]. #[pallet::constant] diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index cdf5a2098d6bb..ee4c9ae45d42b 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -183,8 +183,8 @@ //! are helpful for logging and are thus nested as: //! - [`ElectionError::Miner`]: wraps a [`unsigned::MinerError`]. //! - [`ElectionError::Feasibility`]: wraps a [`FeasibilityError`]. -//! - [`ElectionError::OnChainFallback`]: wraps a -//! [`frame_election_provider_support::onchain::Error`]. +//! - [`ElectionError::Fallback`]: wraps a fallback error. +//! - [`ElectionError::DataProvider`]: wraps a static str. //! //! Note that there could be an overlap between these sub-errors. For example, A //! `SnapshotUnavailable` can happen in both miner and feasibility check phase. @@ -1244,14 +1244,14 @@ impl Pallet { } } - /// Logic for [`::on_initialize`] when signed phase is being opened. + /// Logic for `::on_initialize` when signed phase is being opened. pub fn on_initialize_open_signed() { log!(info, "Starting signed phase round {}.", Self::round()); >::put(Phase::Signed); Self::deposit_event(Event::SignedPhaseStarted { round: Self::round() }); } - /// Logic for [`>::on_initialize`] when unsigned phase is being opened. + /// Logic for `>::on_initialize` when unsigned phase is being opened. pub fn on_initialize_open_unsigned(enabled: bool, now: T::BlockNumber) { let round = Self::round(); log!(info, "Starting unsigned phase round {} enabled {}.", round, enabled); diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index cb36e025c3bee..472584ed2506b 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -384,7 +384,7 @@ pub trait NposSolver { ) -> Result, Self::Error>; } -/// A wrapper for [`sp_npos_elections::seq_phragmen`] that implements [`super::NposSolver`]. See the +/// A wrapper for [`sp_npos_elections::seq_phragmen`] that implements [`NposSolver`]. See the /// documentation of [`sp_npos_elections::seq_phragmen`] for more info. pub struct SequentialPhragmen( sp_std::marker::PhantomData<(AccountId, Accuracy, Balancing)>, @@ -408,8 +408,8 @@ impl< } } -/// A wrapper for [`sp_npos_elections::phragmms`] that implements [`NposSolver`]. See the -/// documentation of [`sp_npos_elections::phragmms`] for more info. +/// A wrapper for [`sp_npos_elections::phragmms()`] that implements [`NposSolver`]. See the +/// documentation of [`sp_npos_elections::phragmms()`] for more info. pub struct PhragMMS( sp_std::marker::PhantomData<(AccountId, Accuracy, Balancing)>, ); diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index fb1ccfdfe2566..6379adae4206b 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -57,7 +57,7 @@ pub struct OnChainSequentialPhragmen(PhantomData); /// /// WARNING: the user of this pallet must ensure that the `Accuracy` type will work nicely with the /// normalization operation done inside `seq_phragmen`. See -/// [`sp_npos_elections::assignment::try_normalize`] for more info. +/// [`sp_npos_elections::Assignment::try_normalize`] for more info. pub trait Config: frame_system::Config { /// The accuracy used to compute the election: type Accuracy: PerThing128; diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index e77c811a35e2d..dd0a9abf8687b 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -219,7 +219,7 @@ where weight } - /// Execute given block, but don't do any of the [`final_checks`]. + /// Execute given block, but don't do any of the `final_checks`. /// /// Should only be used for testing. #[cfg(feature = "try-runtime")] diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index ec7e86af958f1..542b79b792dc7 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -69,7 +69,8 @@ pub mod pallet { /// Convert a balance into a number used for election calculation. This must fit into a /// `u64` but is allowed to be sensibly lossy. The `u64` is used to communicate with the - /// [`sp_npos_elections`] crate which accepts u64 numbers and does operations in 128. + /// [`frame_election_provider_support`] crate which accepts u64 numbers and does operations + /// in 128. /// Consequently, the backward convert is used convert the u128s from sp-elections back to a /// [`BalanceOf`]. type CurrencyToVote: CurrencyToVote>; @@ -146,7 +147,7 @@ pub mod pallet { type OffendingValidatorsThreshold: Get; /// Something that can provide a sorted list of voters in a somewhat sorted way. The - /// original use case for this was designed with [`pallet_bags_list::Pallet`] in mind. If + /// original use case for this was designed with `pallet_bags_list::Pallet` in mind. If /// the bags-list is not desired, [`impls::UseNominatorsMap`] is likely the desired option. type SortedListProvider: SortedListProvider; diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index f54fa79ce609b..a5da775b9c9ea 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -84,6 +84,7 @@ //! }] //! pattern = [{ System: frame_system }] //! tokens = [{ ::{Pallet, Call} }] +//! } //! }] //! pattern = [{ Balances: pallet_balances }] //! tokens = [{ ::{Pallet, Call} }] diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index af9192f6ea836..2adcd8ce4efcf 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1749,7 +1749,7 @@ pub mod pallet_prelude { /// ``` /// /// The optional attribute `#[pallet::unbounded]` allows to declare the storage as unbounded. -/// When implementating the storage info (when #[pallet::generate_storage_info]` is specified +/// When implementating the storage info (when `#[pallet::generate_storage_info]` is specified /// on the pallet struct placeholder), the size of the storage will be declared as unbounded. /// This can be useful for storage which can never go into PoV (Proof of Validity). /// diff --git a/frame/support/src/storage/bounded_vec.rs b/frame/support/src/storage/bounded_vec.rs index 3b5e7bda1651c..b9ece89bff284 100644 --- a/frame/support/src/storage/bounded_vec.rs +++ b/frame/support/src/storage/bounded_vec.rs @@ -130,7 +130,7 @@ impl BoundedVec { self.0.retain(f) } - /// Exactly the same semantics as [`Vec::get_mut`]. + /// Exactly the same semantics as [`slice::get_mut`]. pub fn get_mut>( &mut self, index: I, diff --git a/frame/support/src/storage/weak_bounded_vec.rs b/frame/support/src/storage/weak_bounded_vec.rs index 823c50c55d0b9..566889f5f192c 100644 --- a/frame/support/src/storage/weak_bounded_vec.rs +++ b/frame/support/src/storage/weak_bounded_vec.rs @@ -90,7 +90,7 @@ impl WeakBoundedVec { self.0.retain(f) } - /// Exactly the same semantics as [`Vec::get_mut`]. + /// Exactly the same semantics as [`slice::get_mut`]. pub fn get_mut>( &mut self, index: I, diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 0a3fb045d6c1d..153c3804bd599 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -301,7 +301,7 @@ pub trait PrivilegeCmp { /// Implementation of [`PrivilegeCmp`] that only checks for equal origins. /// -/// This means it will either return [`Origin::Equal`] or `None`. +/// This means it will either return [`Ordering::Equal`] or `None`. pub struct EqualPrivilegeOnly; impl PrivilegeCmp for EqualPrivilegeOnly { fn cmp_privilege(left: &Origin, right: &Origin) -> Option { diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index d5b930fa165e6..12361ed859d0e 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -301,7 +301,8 @@ pub mod pallet { /// What to do if the runtime wants to change the code to something new. /// /// The default (`()`) implementation is responsible for setting the correct storage - /// entry and emitting corresponding event and log item. (see [`update_code_in_storage`]). + /// entry and emitting corresponding event and log item. (see + /// [`Pallet::update_code_in_storage`]). /// It's unlikely that this needs to be customized, unless you are writing a parachain using /// `Cumulus`, where the actual code change is deferred. type OnSetCode: SetCode; diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index 7aab93bbcb973..d9e4de6e19bb7 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -21,7 +21,8 @@ use crate::{crypto::Signature, ValidatorSetId}; /// A commitment signed by GRANDPA validators as part of BEEFY protocol. /// -/// The commitment contains a [payload] extracted from the finalized block at height [block_number]. +/// The commitment contains a [payload](Commitment::payload) extracted from the finalized block at +/// height [block_number](Commitment::block_number). /// GRANDPA validators collect signatures on commitments and a stream of such signed commitments /// (see [SignedCommitment]) forms the BEEFY protocol. #[derive(Clone, Debug, PartialEq, Eq, codec::Encode, codec::Decode)] @@ -33,7 +34,7 @@ pub struct Commitment { /// validator set. The protocol does not enforce any particular format of this data, /// nor how often it should be present in commitments, however the light client has to be /// provided with full validator set whenever it performs the transition (i.e. importing first - /// block with [validator_set_id] incremented). + /// block with [validator_set_id](Commitment::validator_set_id) incremented). pub payload: TPayload, /// Finalized block number this commitment is for. @@ -51,8 +52,8 @@ pub struct Commitment { /// /// Validator set is changing once per epoch. The Light Client must be provided by details /// about the validator set whenever it's importing first commitment with a new - /// `validator_set_id`. Validator set data MUST be verifiable, for instance using [payload] - /// information. + /// `validator_set_id`. Validator set data MUST be verifiable, for instance using + /// [payload](Commitment::payload) information. pub validator_set_id: ValidatorSetId, } diff --git a/primitives/npos-elections/solution-type/src/lib.rs b/primitives/npos-elections/solution-type/src/lib.rs index 9b0ec56fc74de..967ead4400c3e 100644 --- a/primitives/npos-elections/solution-type/src/lib.rs +++ b/primitives/npos-elections/solution-type/src/lib.rs @@ -88,7 +88,7 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// ``` /// /// The given struct provides function to convert from/to `Assignment` as part of -/// [`sp_npos_elections::Solution`] trait: +/// `sp_npos_elections::Solution` trait: /// /// - `fn from_assignment<..>(..)` /// - `fn into_assignment<..>(..)` diff --git a/primitives/npos-elections/src/assignments.rs b/primitives/npos-elections/src/assignments.rs index bdd1e2cd281bb..330f7bd7e843e 100644 --- a/primitives/npos-elections/src/assignments.rs +++ b/primitives/npos-elections/src/assignments.rs @@ -201,7 +201,7 @@ impl IndexAssignment = IndexAssignment< ::VoterIndex, ::TargetIndex, diff --git a/primitives/sandbox/src/lib.rs b/primitives/sandbox/src/lib.rs index f1a24732b7a0a..79c56e70e2f4b 100755 --- a/primitives/sandbox/src/lib.rs +++ b/primitives/sandbox/src/lib.rs @@ -89,7 +89,7 @@ pub type HostFuncType = fn(&mut T, &[Value]) -> Result f64 { /// The last element is always `VoteWeight::MAX`. /// /// All other elements are computed from the previous according to the formula -/// `threshold[k + 1] = (threshold[k] * ratio).max(threshold[k] + 1); +/// `threshold[k + 1] = (threshold[k] * ratio).max(threshold[k] + 1);` pub fn thresholds( existential_weight: VoteWeight, constant_ratio: f64, From 68c0952dcf05312c1229e92ff86fa8191c8610a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Nov 2021 11:12:59 +0000 Subject: [PATCH 127/162] Bump platforms from 1.1.0 to 2.0.0 (#10331) Bumps [platforms](https://github.com/rustsec/rustsec) from 1.1.0 to 2.0.0. - [Release notes](https://github.com/rustsec/rustsec/releases) - [Commits](https://github.com/rustsec/rustsec/commits/platforms/v2.0.0) --- updated-dependencies: - dependency-name: platforms dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- bin/node/cli/Cargo.toml | 2 +- utils/build-script-utils/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56eb2a5543c8e..2c10e9fe0b95e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6643,9 +6643,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "platforms" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" +checksum = "e8d0eef3571242013a0d5dc84861c3ae4a652e56e12adf8bdc26ff5f8cb34c94" [[package]] name = "plotters" diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 5a9e76bccf63b..1bb3671d42bae 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -126,7 +126,7 @@ assert_cmd = "2.0.2" nix = "0.23" serde_json = "1.0" regex = "1" -platforms = "1.1" +platforms = "2.0" async-std = { version = "1.10.0", features = ["attributes"] } soketto = "0.4.2" criterion = { version = "0.3.5", features = [ "async_tokio" ] } diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index 93611c7b5b017..7150b4fa5adcb 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -13,4 +13,4 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -platforms = "1.1" +platforms = "2.0" From c0da7c3d47def8600c9d3ef55617790d81b7d8bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Nov 2021 12:57:50 +0000 Subject: [PATCH 128/162] Bump serde_json from 1.0.68 to 1.0.71 (#10321) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.68 to 1.0.71. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.68...v1.0.71) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- bin/node/bench/Cargo.toml | 2 +- client/chain-spec/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/consensus/babe/rpc/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/keystore/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc-servers/Cargo.toml | 2 +- client/rpc/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/sync-state-rpc/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- frame/merkle-mountain-range/rpc/Cargo.toml | 2 +- frame/transaction-payment/Cargo.toml | 2 +- frame/transaction-payment/asset-tx-payment/Cargo.toml | 2 +- primitives/rpc/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/serializer/Cargo.toml | 2 +- test-utils/client/Cargo.toml | 2 +- 22 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2c10e9fe0b95e..3fd2ba8085a9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8956,9 +8956,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.68" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" +checksum = "063bf466a64011ac24040a49009724ee60a57da1b437617ceb32e53ad61bfb19" dependencies = [ "itoa", "ryu", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 037a233a4ca0d..0beeb4e339e43 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -17,7 +17,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } serde = "1.0.126" -serde_json = "1.0.68" +serde_json = "1.0.71" structopt = "0.3" derive_more = "0.99.16" kvdb = "0.10.0" diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index d7557e2062ac4..976da45859117 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -18,7 +18,7 @@ impl-trait-for-tuples = "0.2.1" sc-network = { version = "0.10.0-dev", path = "../network" } sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } serde = { version = "1.0.126", features = ["derive"] } -serde_json = "1.0.68" +serde_json = "1.0.71" sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } codec = { package = "parity-scale-codec", version = "2.0.0" } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 2855e63cdc6a0..8667a5e876c14 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -23,7 +23,7 @@ parity-scale-codec = "2.3.1" hex = "0.4.2" rand = "0.7.3" tiny-bip39 = "0.8.2" -serde_json = "1.0.68" +serde_json = "1.0.71" sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sp-panic-handler = { version = "4.0.0-dev", path = "../../primitives/panic-handler" } sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 3a7cf86a800c1..b47c3a711df99 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -33,7 +33,7 @@ sp-keystore = { version = "0.10.0-dev", path = "../../../../primitives/keystore" [dev-dependencies] sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } -serde_json = "1.0.68" +serde_json = "1.0.71" sp-keyring = { version = "4.0.0-dev", path = "../../../../primitives/keyring" } sc-keystore = { version = "4.0.0-dev", path = "../../../keystore" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 1dd8c2518ab72..65a0580b9e953 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -35,7 +35,7 @@ sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } -serde_json = "1.0.68" +serde_json = "1.0.71" sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sc-network = { version = "0.10.0-dev", path = "../network" } diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 9e2928d41e898..9da825faf033a 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -22,7 +22,7 @@ sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } hex = "0.4.0" parking_lot = "0.11.1" -serde_json = "1.0.68" +serde_json = "1.0.71" [dev-dependencies] tempfile = "3.1.0" diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 13eb408b0bb58..c4622c0c5df0e 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -47,7 +47,7 @@ sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-peerset = { version = "4.0.0-dev", path = "../peerset" } serde = { version = "1.0.126", features = ["derive"] } -serde_json = "1.0.68" +serde_json = "1.0.71" smallvec = "1.7.0" sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 01b75f1094ff4..fba499bce00e3 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -18,7 +18,7 @@ futures = "0.3.9" libp2p = { version = "0.39.1", default-features = false } sc-utils = { version = "4.0.0-dev", path = "../utils"} log = "0.4.8" -serde_json = "1.0.68" +serde_json = "1.0.71" wasm-timer = "0.2" [dev-dependencies] diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 532a5cf6294ce..a758d29aed2c2 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -28,7 +28,7 @@ sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } sp-runtime = { path = "../../primitives/runtime", version = "4.0.0-dev" } sc-chain-spec = { path = "../chain-spec", version = "4.0.0-dev" } serde = { version = "1.0.126", features = ["derive"] } -serde_json = "1.0.68" +serde_json = "1.0.71" sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-rpc = { version = "4.0.0-dev", path = "../../primitives/rpc" } sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 3be2380785064..f6f08ac581d4f 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -18,7 +18,7 @@ jsonrpc-core = "18.0.0" pubsub = { package = "jsonrpc-pubsub", version = "18.0.0" } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev"} -serde_json = "1.0.68" +serde_json = "1.0.71" tokio = "1.13" http = { package = "jsonrpc-http-server", version = "18.0.0" } ipc = { package = "jsonrpc-ipc-server", version = "18.0.0" } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index d8aecfe9fd354..6cf2d699386df 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -23,7 +23,7 @@ log = "0.4.8" sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } rpc = { package = "jsonrpc-core", version = "18.0.0" } sp-version = { version = "4.0.0-dev", path = "../../primitives/version" } -serde_json = "1.0.68" +serde_json = "1.0.71" sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } sp-offchain = { version = "4.0.0-dev", path = "../../primitives/offchain" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index a66a4fc67680e..161863b14710b 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -34,7 +34,7 @@ exit-future = "0.2.0" pin-project = "1.0.8" hash-db = "0.15.2" serde = "1.0.126" -serde_json = "1.0.68" +serde_json = "1.0.71" sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index 6d8d95954629c..c609ac42c76e9 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -23,7 +23,7 @@ sc-consensus-babe = { version = "0.10.0-dev", path = "../consensus/babe" } sc-consensus-epochs = { version = "0.10.0-dev", path = "../consensus/epochs" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../finality-grandpa" } sc-rpc-api = { version = "0.10.0-dev", path = "../rpc-api" } -serde_json = "1.0.68" +serde_json = "1.0.71" serde = { version = "1.0.126", features = ["derive"] } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 744a0610a07e3..9165f6f87af68 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -23,6 +23,6 @@ log = "0.4.8" pin-project = "1.0.8" rand = "0.7.2" serde = { version = "1.0.126", features = ["derive"] } -serde_json = "1.0.68" +serde_json = "1.0.71" chrono = "0.4.19" thiserror = "1.0.30" diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 881cbb3c8ebfe..d8f861ad8ec50 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -27,4 +27,4 @@ sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } pallet-mmr-primitives = { version = "4.0.0-dev", path = "../primitives" } [dev-dependencies] -serde_json = "1.0.68" +serde_json = "1.0.71" diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 1d3066e39fbda..3b8b85977db3d 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -29,7 +29,7 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } [dev-dependencies] -serde_json = "1.0.68" +serde_json = "1.0.71" pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index a381145d667a1..e4f3b128cfce9 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -30,7 +30,7 @@ serde = { version = "1.0.126", optional = true } [dev-dependencies] smallvec = "1.7.0" -serde_json = "1.0.68" +serde_json = "1.0.71" sp-storage = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/storage" } diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 15f7aa2b3b896..2d56cb06fbcca 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -18,4 +18,4 @@ sp-core = { version = "4.0.0-dev", path = "../core" } rustc-hash = "1.1.0" [dev-dependencies] -serde_json = "1.0.68" +serde_json = "1.0.71" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 8e466bf4a1e85..7966bb28255b7 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -32,7 +32,7 @@ hash256-std-hasher = { version = "0.15.2", default-features = false } either = { version = "1.5", default-features = false } [dev-dependencies] -serde_json = "1.0.68" +serde_json = "1.0.71" rand = "0.7.2" sp-state-machine = { version = "0.10.0-dev", path = "../state-machine" } sp-api = { version = "4.0.0-dev", path = "../api" } diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 2b2acb8dbc373..359217b9b4d5a 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -15,4 +15,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = "1.0.126" -serde_json = "1.0.68" +serde_json = "1.0.71" diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 1277863c94f73..ef481dd3f202a 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0" } futures = "0.3.16" hex = "0.4" serde = "1.0.126" -serde_json = "1.0.68" +serde_json = "1.0.71" sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } sc-client-db = { version = "0.10.0-dev", features = [ "test-helpers", From 5ed2f8fc99b67befc86fd30dce344e394c65e536 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Nov 2021 19:03:41 +0000 Subject: [PATCH 129/162] Bump paste from 1.0.4 to 1.0.6 (#10333) Bumps [paste](https://github.com/dtolnay/paste) from 1.0.4 to 1.0.6. - [Release notes](https://github.com/dtolnay/paste/releases) - [Commits](https://github.com/dtolnay/paste/compare/1.0.4...1.0.6) --- updated-dependencies: - dependency-name: paste dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- client/executor/runtime-test/Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3fd2ba8085a9a..6eb7efb084867 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1947,7 +1947,7 @@ dependencies = [ "linregress", "log 0.4.14", "parity-scale-codec", - "paste 1.0.4", + "paste 1.0.6", "scale-info", "sp-api", "sp-io", @@ -2044,7 +2044,7 @@ dependencies = [ "once_cell", "parity-scale-codec", "parity-util-mem", - "paste 1.0.4", + "paste 1.0.6", "pretty_assertions", "scale-info", "serde", @@ -6465,9 +6465,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" +checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" [[package]] name = "paste-impl" @@ -8072,7 +8072,7 @@ dependencies = [ "log 0.4.14", "parity-scale-codec", "parking_lot 0.11.1", - "paste 1.0.4", + "paste 1.0.6", "regex", "sc-executor-common", "sc-executor-wasmi", @@ -8495,7 +8495,7 @@ dependencies = [ name = "sc-runtime-test" version = "2.0.0" dependencies = [ - "paste 1.0.4", + "paste 1.0.6", "sp-core", "sp-io", "sp-runtime", @@ -9082,7 +9082,7 @@ dependencies = [ "approx", "num-complex", "num-traits", - "paste 1.0.4", + "paste 1.0.6", ] [[package]] @@ -9696,7 +9696,7 @@ dependencies = [ "log 0.4.14", "parity-scale-codec", "parity-util-mem", - "paste 1.0.4", + "paste 1.0.6", "rand 0.7.3", "scale-info", "serde", @@ -11604,7 +11604,7 @@ dependencies = [ "libc", "log 0.4.14", "object 0.27.1", - "paste 1.0.4", + "paste 1.0.6", "psm", "rayon", "region", diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 2c82a9705ceeb..ea379a160f80c 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -19,7 +19,7 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../.. sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } -paste = "1.0.4" +paste = "1.0.6" [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } From 07101053f9ad7b78de32b7a9df3fe0fb304cf1e2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Nov 2021 22:57:44 +0100 Subject: [PATCH 130/162] Bump prost from 0.8.0 to 0.9.0 (#10341) Bumps [prost](https://github.com/tokio-rs/prost) from 0.8.0 to 0.9.0. - [Release notes](https://github.com/tokio-rs/prost/releases) - [Commits](https://github.com/tokio-rs/prost/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: prost dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- client/authority-discovery/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6eb7efb084867..a45e0accd49b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7632,7 +7632,7 @@ dependencies = [ "libp2p", "log 0.4.14", "parity-scale-codec", - "prost 0.8.0", + "prost 0.9.0", "prost-build 0.9.0", "quickcheck", "rand 0.7.3", @@ -8282,7 +8282,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.8", - "prost 0.8.0", + "prost 0.9.0", "prost-build 0.9.0", "quickcheck", "rand 0.7.3", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index fff1ed08a8c11..b6f9b8450d861 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -26,7 +26,7 @@ ip_network = "0.4.0" libp2p = { version = "0.39.1", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" } -prost = "0.8" +prost = "0.9" rand = "0.7.2" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network = { version = "0.10.0-dev", path = "../network" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index c4622c0c5df0e..527a18dc7755b 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -41,7 +41,7 @@ log = "0.4.8" parking_lot = "0.11.1" pin-project = "1.0.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } -prost = "0.8" +prost = "0.9" rand = "0.7.2" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../api" } From 4de8ee2dee389f4246f3156667fc3d6df7047e4f Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 23 Nov 2021 15:35:19 +0900 Subject: [PATCH 131/162] Fix WASM executor without instance reuse; cleanups and refactoring (#10313) * Fix WASM executor without instance reuse; cleanups and refactoring * Align to review comments * Move the functions for reading/writing memory to `util.rs` * Only `#[ignore]` the test in debug builds * More review comments and minor extra comments --- client/executor/wasmtime/build.rs | 25 ++ client/executor/wasmtime/src/host.rs | 212 ++++++++--------- client/executor/wasmtime/src/imports.rs | 15 +- .../executor/wasmtime/src/instance_wrapper.rs | 217 ++++++------------ client/executor/wasmtime/src/runtime.rs | 206 ++++++++--------- client/executor/wasmtime/src/tests.rs | 36 +++ client/executor/wasmtime/src/util.rs | 59 ++++- 7 files changed, 394 insertions(+), 376 deletions(-) create mode 100644 client/executor/wasmtime/build.rs diff --git a/client/executor/wasmtime/build.rs b/client/executor/wasmtime/build.rs new file mode 100644 index 0000000000000..6ab581c9c2685 --- /dev/null +++ b/client/executor/wasmtime/build.rs @@ -0,0 +1,25 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::env; + +fn main() { + if let Ok(profile) = env::var("PROFILE") { + println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + } +} diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index fcb4c4cae3b8a..39ee9ced80af7 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -19,7 +19,7 @@ //! This module defines `HostState` and `HostContext` structs which provide logic and state //! required for execution of host. -use crate::{instance_wrapper::InstanceWrapper, runtime::StoreData}; +use crate::{runtime::StoreData, util}; use codec::{Decode, Encode}; use log::trace; use sc_allocator::FreeingBumpHeapAllocator; @@ -30,101 +30,104 @@ use sc_executor_common::{ }; use sp_core::sandbox as sandbox_primitives; use sp_wasm_interface::{FunctionContext, MemoryId, Pointer, Sandbox, WordSize}; -use std::{cell::RefCell, rc::Rc}; use wasmtime::{Caller, Func, Val}; +// The sandbox store is inside of a Option>> so that we can temporarily borrow it. +struct SandboxStore(Option>>); + +// There are a bunch of `Rc`s within the sandbox store, however we only manipulate +// those within one thread so this should be safe. +unsafe impl Send for SandboxStore {} + /// The state required to construct a HostContext context. The context only lasts for one host /// call, whereas the state is maintained for the duration of a Wasm runtime call, which may make /// many different host calls that must share state. pub struct HostState { - /// We need some interior mutability here since the host state is shared between all host - /// function handlers and the wasmtime backend's `impl WasmRuntime`. - /// - /// Furthermore, because of recursive calls (e.g. runtime can create and call an sandboxed - /// instance which in turn can call the runtime back) we have to be very careful with borrowing - /// those. - /// - /// Basically, most of the interactions should do temporary borrow immediately releasing the - /// borrow after performing necessary queries/changes. - sandbox_store: Rc>>, - allocator: RefCell, - instance: Rc, + sandbox_store: SandboxStore, + allocator: FreeingBumpHeapAllocator, } impl HostState { /// Constructs a new `HostState`. - pub fn new(allocator: FreeingBumpHeapAllocator, instance: Rc) -> Self { + pub fn new(allocator: FreeingBumpHeapAllocator) -> Self { HostState { - sandbox_store: Rc::new(RefCell::new(sandbox::Store::new( + sandbox_store: SandboxStore(Some(Box::new(sandbox::Store::new( sandbox::SandboxBackend::TryWasmer, - ))), - allocator: RefCell::new(allocator), - instance, + )))), + allocator, } } - - /// Materialize `HostContext` that can be used to invoke a substrate host `dyn Function`. - pub(crate) fn materialize<'a, 'b, 'c>( - &'a self, - caller: &'b mut Caller<'c, StoreData>, - ) -> HostContext<'a, 'b, 'c> { - HostContext { host_state: self, caller } - } } /// A `HostContext` implements `FunctionContext` for making host calls from a Wasmtime /// runtime. The `HostContext` exists only for the lifetime of the call and borrows state from /// a longer-living `HostState`. -pub(crate) struct HostContext<'a, 'b, 'c> { - host_state: &'a HostState, - caller: &'b mut Caller<'c, StoreData>, +pub(crate) struct HostContext<'a, 'b> { + pub(crate) caller: &'a mut Caller<'b, StoreData>, } -impl<'a, 'b, 'c> std::ops::Deref for HostContext<'a, 'b, 'c> { - type Target = HostState; - fn deref(&self) -> &HostState { - self.host_state +impl<'a, 'b> HostContext<'a, 'b> { + fn host_state(&self) -> &HostState { + self.caller + .data() + .host_state() + .expect("host state is not empty when calling a function in wasm; qed") + } + + fn host_state_mut(&mut self) -> &mut HostState { + self.caller + .data_mut() + .host_state_mut() + .expect("host state is not empty when calling a function in wasm; qed") + } + + fn sandbox_store(&self) -> &sandbox::Store { + self.host_state() + .sandbox_store + .0 + .as_ref() + .expect("sandbox store is only empty when temporarily borrowed") + } + + fn sandbox_store_mut(&mut self) -> &mut sandbox::Store { + self.host_state_mut() + .sandbox_store + .0 + .as_mut() + .expect("sandbox store is only empty when temporarily borrowed") } } -impl<'a, 'b, 'c> sp_wasm_interface::FunctionContext for HostContext<'a, 'b, 'c> { +impl<'a, 'b> sp_wasm_interface::FunctionContext for HostContext<'a, 'b> { fn read_memory_into( &self, address: Pointer, dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { - let ctx = &self.caller; - self.host_state - .instance - .read_memory_into(ctx, address, dest) - .map_err(|e| e.to_string()) + util::read_memory_into(&self.caller, address, dest).map_err(|e| e.to_string()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - let ctx = &mut self.caller; - self.host_state - .instance - .write_memory_from(ctx, address, data) - .map_err(|e| e.to_string()) + util::write_memory_from(&mut self.caller, address, data).map_err(|e| e.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { - let ctx = &mut self.caller; - let allocator = &self.host_state.allocator; - - self.host_state - .instance - .allocate(ctx, &mut *allocator.borrow_mut(), size) + let memory = self.caller.data().memory(); + let (memory, data) = memory.data_and_store_mut(&mut self.caller); + data.host_state_mut() + .expect("host state is not empty when calling a function in wasm; qed") + .allocator + .allocate(memory, size) .map_err(|e| e.to_string()) } fn deallocate_memory(&mut self, ptr: Pointer) -> sp_wasm_interface::Result<()> { - let ctx = &mut self.caller; - let allocator = &self.host_state.allocator; - - self.host_state - .instance - .deallocate(ctx, &mut *allocator.borrow_mut(), ptr) + let memory = self.caller.data().memory(); + let (memory, data) = memory.data_and_store_mut(&mut self.caller); + data.host_state_mut() + .expect("host state is not empty when calling a function in wasm; qed") + .allocator + .deallocate(memory, ptr) .map_err(|e| e.to_string()) } @@ -133,7 +136,7 @@ impl<'a, 'b, 'c> sp_wasm_interface::FunctionContext for HostContext<'a, 'b, 'c> } } -impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { +impl<'a, 'b> Sandbox for HostContext<'a, 'b> { fn memory_get( &mut self, memory_id: MemoryId, @@ -141,8 +144,7 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { buf_ptr: Pointer, buf_len: WordSize, ) -> sp_wasm_interface::Result { - let sandboxed_memory = - self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; + let sandboxed_memory = self.sandbox_store().memory(memory_id).map_err(|e| e.to_string())?; let len = buf_len as usize; @@ -151,8 +153,7 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { Ok(buffer) => buffer, }; - let instance = self.instance.clone(); - if let Err(_) = instance.write_memory_from(&mut self.caller, buf_ptr, &buffer) { + if util::write_memory_from(&mut self.caller, buf_ptr, &buffer).is_err() { return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) } @@ -166,17 +167,16 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { val_ptr: Pointer, val_len: WordSize, ) -> sp_wasm_interface::Result { - let sandboxed_memory = - self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; + let sandboxed_memory = self.sandbox_store().memory(memory_id).map_err(|e| e.to_string())?; let len = val_len as usize; - let buffer = match self.instance.read_memory(&self.caller, val_ptr, len) { + let buffer = match util::read_memory(&self.caller, val_ptr, len) { Err(_) => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), Ok(buffer) => buffer, }; - if let Err(_) = sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer) { + if sandboxed_memory.write_from(Pointer::new(offset as u32), &buffer).is_err() { return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) } @@ -184,17 +184,11 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { } fn memory_teardown(&mut self, memory_id: MemoryId) -> sp_wasm_interface::Result<()> { - self.sandbox_store - .borrow_mut() - .memory_teardown(memory_id) - .map_err(|e| e.to_string()) + self.sandbox_store_mut().memory_teardown(memory_id).map_err(|e| e.to_string()) } fn memory_new(&mut self, initial: u32, maximum: u32) -> sp_wasm_interface::Result { - self.sandbox_store - .borrow_mut() - .new_memory(initial, maximum) - .map_err(|e| e.to_string()) + self.sandbox_store_mut().new_memory(initial, maximum).map_err(|e| e.to_string()) } fn invoke( @@ -215,14 +209,10 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { .map(Into::into) .collect::>(); - let instance = - self.sandbox_store.borrow().instance(instance_id).map_err(|e| e.to_string())?; + let instance = self.sandbox_store().instance(instance_id).map_err(|e| e.to_string())?; - let dispatch_thunk = self - .sandbox_store - .borrow() - .dispatch_thunk(instance_id) - .map_err(|e| e.to_string())?; + let dispatch_thunk = + self.sandbox_store().dispatch_thunk(instance_id).map_err(|e| e.to_string())?; let result = instance.invoke( export_name, @@ -249,8 +239,7 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { } fn instance_teardown(&mut self, instance_id: u32) -> sp_wasm_interface::Result<()> { - self.sandbox_store - .borrow_mut() + self.sandbox_store_mut() .instance_teardown(instance_id) .map_err(|e| e.to_string()) } @@ -264,14 +253,12 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { ) -> sp_wasm_interface::Result { // Extract a dispatch thunk from the instance's table by the specified index. let dispatch_thunk = { - let ctx = &mut self.caller; - let table_item = self - .host_state - .instance + let table = self + .caller + .data() .table() - .as_ref() - .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")? - .get(ctx, dispatch_thunk_id); + .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")?; + let table_item = table.get(&mut self.caller, dispatch_thunk_id); table_item .ok_or_else(|| "dispatch_thunk_id is out of bounds")? @@ -281,25 +268,39 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { .clone() }; - let guest_env = - match sandbox::GuestEnvironment::decode(&*self.sandbox_store.borrow(), raw_env_def) { - Ok(guest_env) => guest_env, - Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), - }; + let guest_env = match sandbox::GuestEnvironment::decode(&self.sandbox_store(), raw_env_def) + { + Ok(guest_env) => guest_env, + Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), + }; - let store = self.sandbox_store.clone(); - let store = &mut store.borrow_mut(); - let result = store - .instantiate( + let mut store = self + .host_state_mut() + .sandbox_store + .0 + .take() + .expect("sandbox store is only empty when borrowed"); + + // Catch any potential panics so that we can properly restore the sandbox store + // which we've destructively borrowed. + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + store.instantiate( wasm, guest_env, state, &mut SandboxContext { host_context: self, dispatch_thunk: dispatch_thunk.clone() }, ) - .map(|i| i.register(store, dispatch_thunk)); + })); + + self.host_state_mut().sandbox_store.0 = Some(store); + + let result = match result { + Ok(result) => result, + Err(error) => std::panic::resume_unwind(error), + }; let instance_idx_or_err_code = match result { - Ok(instance_idx) => instance_idx, + Ok(instance) => instance.register(&mut self.sandbox_store_mut(), dispatch_thunk), Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, Err(_) => sandbox_primitives::ERR_MODULE, }; @@ -312,20 +313,19 @@ impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { instance_idx: u32, name: &str, ) -> sp_wasm_interface::Result> { - self.sandbox_store - .borrow() + self.sandbox_store() .instance(instance_idx) .map(|i| i.get_global_val(name)) .map_err(|e| e.to_string()) } } -struct SandboxContext<'a, 'b, 'c, 'd> { - host_context: &'a mut HostContext<'b, 'c, 'd>, +struct SandboxContext<'a, 'b, 'c> { + host_context: &'a mut HostContext<'b, 'c>, dispatch_thunk: Func, } -impl<'a, 'b, 'c, 'd> sandbox::SandboxContext for SandboxContext<'a, 'b, 'c, 'd> { +impl<'a, 'b, 'c> sandbox::SandboxContext for SandboxContext<'a, 'b, 'c> { fn invoke( &mut self, invoke_args_ptr: Pointer, diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index a00ab14263e7f..57ce48f537e94 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -17,6 +17,7 @@ // along with this program. If not, see . use crate::{ + host::HostContext, runtime::{Store, StoreData}, util, }; @@ -191,19 +192,7 @@ fn call_static<'a>( mut caller: Caller<'a, StoreData>, ) -> Result<(), wasmtime::Trap> { let unwind_result = { - let host_state = caller - .data() - .host_state() - .expect( - "host functions can be called only from wasm instance; - wasm instance is always called initializing context; - therefore host_ctx cannot be None; - qed - ", - ) - .clone(); - - let mut host_ctx = host_state.materialize(&mut caller); + let mut host_ctx = HostContext { caller: &mut caller }; // `from_wasmtime_val` panics if it encounters a value that doesn't fit into the values // available in substrate. diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 1d40563d0a9ff..e9b18d8c2b89d 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -19,15 +19,12 @@ //! Defines data and logic needed for interaction with an WebAssembly instance of a substrate //! runtime module. -use crate::imports::Imports; - +use crate::runtime::{Store, StoreData}; use sc_executor_common::{ error::{Error, Result}, - util::checked_range, wasm_runtime::InvokeMethod, }; -use sp_wasm_interface::{Pointer, Value, WordSize}; -use std::marker; +use sp_wasm_interface::{Function, Pointer, Value, WordSize}; use wasmtime::{ AsContext, AsContextMut, Extern, Func, Global, Instance, Memory, Module, Table, Val, }; @@ -107,18 +104,8 @@ impl EntryPoint { /// routines. pub struct InstanceWrapper { instance: Instance, - - // The memory instance of the `instance`. - // - // It is important to make sure that we don't make any copies of this to make it easier to - // proof See `memory_as_slice` and `memory_as_slice_mut`. memory: Memory, - - /// Indirect functions table of the module - table: Option, - - // Make this struct explicitly !Send & !Sync. - _not_send_nor_sync: marker::PhantomData<*const ()>, + store: Store, } fn extern_memory(extern_: &Extern) -> Option<&Memory> { @@ -153,11 +140,36 @@ impl InstanceWrapper { /// Create a new instance wrapper from the given wasm module. pub fn new( module: &Module, - imports: &Imports, + host_functions: &[&'static dyn Function], heap_pages: u64, - mut ctx: impl AsContextMut, + allow_missing_func_imports: bool, + max_memory_size: Option, ) -> Result { - let instance = Instance::new(&mut ctx, module, &imports.externs) + let limits = if let Some(max_memory_size) = max_memory_size { + wasmtime::StoreLimitsBuilder::new().memory_size(max_memory_size).build() + } else { + Default::default() + }; + + let mut store = Store::new( + module.engine(), + StoreData { limits, host_state: None, memory: None, table: None }, + ); + if max_memory_size.is_some() { + store.limiter(|s| &mut s.limits); + } + + // Scan all imports, find the matching host functions, and create stubs that adapt arguments + // and results. + let imports = crate::imports::resolve_imports( + &mut store, + module, + host_functions, + heap_pages, + allow_missing_func_imports, + )?; + + let instance = Instance::new(&mut store, module, &imports.externs) .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; let memory = match imports.memory_import_index { @@ -165,55 +177,56 @@ impl InstanceWrapper { .expect("only memory can be at the `memory_idx`; qed") .clone(), None => { - let memory = get_linear_memory(&instance, &mut ctx)?; - if !memory.grow(&mut ctx, heap_pages).is_ok() { + let memory = get_linear_memory(&instance, &mut store)?; + if !memory.grow(&mut store, heap_pages).is_ok() { return Err("failed top increase the linear memory size".into()) } memory }, }; - let table = get_table(&instance, ctx); + let table = get_table(&instance, &mut store); + + store.data_mut().memory = Some(memory); + store.data_mut().table = table; - Ok(Self { table, instance, memory, _not_send_nor_sync: marker::PhantomData }) + Ok(Self { instance, memory, store }) } /// Resolves a substrate entrypoint by the given name. /// /// An entrypoint must have a signature `(i32, i32) -> i64`, otherwise this function will return /// an error. - pub fn resolve_entrypoint( - &self, - method: InvokeMethod, - mut ctx: impl AsContextMut, - ) -> Result { + pub fn resolve_entrypoint(&mut self, method: InvokeMethod) -> Result { Ok(match method { InvokeMethod::Export(method) => { // Resolve the requested method and verify that it has a proper signature. - let export = self.instance.get_export(&mut ctx, method).ok_or_else(|| { - Error::from(format!("Exported method {} is not found", method)) - })?; + let export = + self.instance.get_export(&mut self.store, method).ok_or_else(|| { + Error::from(format!("Exported method {} is not found", method)) + })?; let func = extern_func(&export) .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? .clone(); - EntryPoint::direct(func, ctx).map_err(|_| { + EntryPoint::direct(func, &self.store).map_err(|_| { Error::from(format!("Exported function '{}' has invalid signature.", method)) })? }, InvokeMethod::Table(func_ref) => { let table = self .instance - .get_table(&mut ctx, "__indirect_function_table") + .get_table(&mut self.store, "__indirect_function_table") .ok_or(Error::NoTable)?; - let val = - table.get(&mut ctx, func_ref).ok_or(Error::NoTableEntryWithIndex(func_ref))?; + let val = table + .get(&mut self.store, func_ref) + .ok_or(Error::NoTableEntryWithIndex(func_ref))?; let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? .ok_or(Error::FunctionRefIsNull(func_ref))? .clone(); - EntryPoint::direct(func, ctx).map_err(|_| { + EntryPoint::direct(func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for direct call.", func_ref, @@ -223,10 +236,10 @@ impl InstanceWrapper { InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { let table = self .instance - .get_table(&mut ctx, "__indirect_function_table") + .get_table(&mut self.store, "__indirect_function_table") .ok_or(Error::NoTable)?; let val = table - .get(&mut ctx, dispatcher_ref) + .get(&mut self.store, dispatcher_ref) .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; let dispatcher = val .funcref() @@ -234,7 +247,7 @@ impl InstanceWrapper { .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? .clone(); - EntryPoint::wrapped(dispatcher, func, ctx).map_err(|_| { + EntryPoint::wrapped(dispatcher, func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for wrapped call.", dispatcher_ref, @@ -244,25 +257,20 @@ impl InstanceWrapper { }) } - /// Returns an indirect function table of this instance. - pub fn table(&self) -> Option<&Table> { - self.table.as_ref() - } - /// Reads `__heap_base: i32` global variable and returns it. /// /// If it doesn't exist, not a global or of not i32 type returns an error. - pub fn extract_heap_base(&self, mut ctx: impl AsContextMut) -> Result { + pub fn extract_heap_base(&mut self) -> Result { let heap_base_export = self .instance - .get_export(&mut ctx, "__heap_base") + .get_export(&mut self.store, "__heap_base") .ok_or_else(|| Error::from("__heap_base is not found"))?; let heap_base_global = extern_global(&heap_base_export) .ok_or_else(|| Error::from("__heap_base is not a global"))?; let heap_base = heap_base_global - .get(&mut ctx) + .get(&mut self.store) .i32() .ok_or_else(|| Error::from("__heap_base is not a i32"))?; @@ -270,15 +278,15 @@ impl InstanceWrapper { } /// Get the value from a global with the given `name`. - pub fn get_global_val(&self, mut ctx: impl AsContextMut, name: &str) -> Result> { - let global = match self.instance.get_export(&mut ctx, name) { + pub fn get_global_val(&mut self, name: &str) -> Result> { + let global = match self.instance.get_export(&mut self.store, name) { Some(global) => global, None => return Ok(None), }; let global = extern_global(&global).ok_or_else(|| format!("`{}` is not a global", name))?; - match global.get(ctx) { + match global.get(&mut self.store) { Val::I32(val) => Ok(Some(Value::I32(val))), Val::I64(val) => Ok(Some(Value::I64(val))), Val::F32(val) => Ok(Some(Value::F32(val))), @@ -288,8 +296,8 @@ impl InstanceWrapper { } /// Get a global with the given `name`. - pub fn get_global(&self, ctx: impl AsContextMut, name: &str) -> Option { - self.instance.get_global(ctx, name) + pub fn get_global(&mut self, name: &str) -> Option { + self.instance.get_global(&mut self.store, name) } } @@ -307,7 +315,7 @@ fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result Option
{ +fn get_table(instance: &Instance, ctx: &mut Store) -> Option
{ instance .get_export(ctx, "__indirect_function_table") .as_ref() @@ -317,97 +325,16 @@ fn get_table(instance: &Instance, ctx: impl AsContextMut) -> Option
{ /// Functions related to memory. impl InstanceWrapper { - /// Read data from a slice of memory into a newly allocated buffer. - /// - /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory( - &self, - ctx: impl AsContext, - source_addr: Pointer, - size: usize, - ) -> Result> { - let range = checked_range(source_addr.into(), size, self.memory.data_size(&ctx)) - .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; - - let mut buffer = vec![0; range.len()]; - self.read_memory_into(ctx, source_addr, &mut buffer)?; - - Ok(buffer) - } - - /// Read data from the instance memory into a slice. - /// - /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory_into( - &self, - ctx: impl AsContext, - address: Pointer, - dest: &mut [u8], - ) -> Result<()> { - let memory = self.memory.data(ctx.as_context()); - - let range = checked_range(address.into(), dest.len(), memory.len()) - .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; - dest.copy_from_slice(&memory[range]); - Ok(()) - } - - /// Write data to the instance memory from a slice. - /// - /// Returns an error if the write would go out of the memory bounds. - pub fn write_memory_from( - &self, - mut ctx: impl AsContextMut, - address: Pointer, - data: &[u8], - ) -> Result<()> { - let memory = self.memory.data_mut(ctx.as_context_mut()); - - let range = checked_range(address.into(), data.len(), memory.len()) - .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - memory[range].copy_from_slice(data); - Ok(()) - } - - /// Allocate some memory of the given size. Returns pointer to the allocated memory region. - /// - /// Returns `Err` in case memory cannot be allocated. Refer to the allocator documentation - /// to get more details. - pub fn allocate( - &self, - mut ctx: impl AsContextMut, - allocator: &mut sc_allocator::FreeingBumpHeapAllocator, - size: WordSize, - ) -> Result> { - let memory = self.memory.data_mut(ctx.as_context_mut()); - - allocator.allocate(memory, size).map_err(Into::into) - } - - /// Deallocate the memory pointed by the given pointer. - /// - /// Returns `Err` in case the given memory region cannot be deallocated. - pub fn deallocate( - &self, - mut ctx: impl AsContextMut, - allocator: &mut sc_allocator::FreeingBumpHeapAllocator, - ptr: Pointer, - ) -> Result<()> { - let memory = self.memory.data_mut(ctx.as_context_mut()); - - allocator.deallocate(memory, ptr).map_err(Into::into) - } - /// Returns the pointer to the first byte of the linear memory for this instance. - pub fn base_ptr(&self, ctx: impl AsContext) -> *const u8 { - self.memory.data_ptr(ctx) + pub fn base_ptr(&self) -> *const u8 { + self.memory.data_ptr(&self.store) } /// If possible removes physical backing from the allocated linear memory which /// leads to returning the memory back to the system; this also zeroes the memory /// as a side-effect. - pub fn decommit(&self, mut ctx: impl AsContextMut) { - if self.memory.data_size(&ctx) == 0 { + pub fn decommit(&mut self) { + if self.memory.data_size(&self.store) == 0 { return } @@ -416,8 +343,8 @@ impl InstanceWrapper { use std::sync::Once; unsafe { - let ptr = self.memory.data_ptr(&ctx); - let len = self.memory.data_size(&ctx); + let ptr = self.memory.data_ptr(&self.store); + let len = self.memory.data_size(&self.store); // Linux handles MADV_DONTNEED reliably. The result is that the given area // is unmapped and will be zeroed on the next pagefault. @@ -438,6 +365,14 @@ impl InstanceWrapper { // If we're on an unsupported OS or the memory couldn't have been // decommited for some reason then just manually zero it out. - self.memory.data_mut(ctx.as_context_mut()).fill(0); + self.memory.data_mut(self.store.as_context_mut()).fill(0); + } + + pub(crate) fn store(&self) -> &Store { + &self.store + } + + pub(crate) fn store_mut(&mut self) -> &mut Store { + &mut self.store } } diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 7808ac7ce547d..606401132e9e9 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -20,7 +20,6 @@ use crate::{ host::HostState, - imports::{resolve_imports, Imports}, instance_wrapper::{EntryPoint, InstanceWrapper}, util, }; @@ -37,75 +36,98 @@ use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{Function, Pointer, Value, WordSize}; use std::{ path::{Path, PathBuf}, - rc::Rc, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, }; -use wasmtime::{AsContext, AsContextMut, Engine, StoreLimits}; +use wasmtime::{Engine, Memory, StoreLimits, Table}; pub(crate) struct StoreData { - /// The limits we aply to the store. We need to store it here to return a reference to this + /// The limits we apply to the store. We need to store it here to return a reference to this /// object when we have the limits enabled. - limits: StoreLimits, + pub(crate) limits: StoreLimits, /// This will only be set when we call into the runtime. - host_state: Option>, + pub(crate) host_state: Option, + /// This will be always set once the store is initialized. + pub(crate) memory: Option, + /// This will be set only if the runtime actually contains a table. + pub(crate) table: Option
, } impl StoreData { /// Returns a reference to the host state. - pub fn host_state(&self) -> Option<&Rc> { + pub fn host_state(&self) -> Option<&HostState> { self.host_state.as_ref() } + + /// Returns a mutable reference to the host state. + pub fn host_state_mut(&mut self) -> Option<&mut HostState> { + self.host_state.as_mut() + } + + /// Returns the host memory. + pub fn memory(&self) -> Memory { + self.memory.expect("memory is always set; qed") + } + + /// Returns the host table. + pub fn table(&self) -> Option
{ + self.table + } } pub(crate) type Store = wasmtime::Store; enum Strategy { FastInstanceReuse { - instance_wrapper: Rc, + instance_wrapper: InstanceWrapper, globals_snapshot: GlobalsSnapshot, data_segments_snapshot: Arc, heap_base: u32, - store: Store, }, RecreateInstance(InstanceCreator), } struct InstanceCreator { - store: Store, module: Arc, - imports: Arc, + host_functions: Vec<&'static dyn Function>, heap_pages: u64, + allow_missing_func_imports: bool, + max_memory_size: Option, } impl InstanceCreator { fn instantiate(&mut self) -> Result { - InstanceWrapper::new(&*self.module, &*self.imports, self.heap_pages, &mut self.store) + InstanceWrapper::new( + &*self.module, + &self.host_functions, + self.heap_pages, + self.allow_missing_func_imports, + self.max_memory_size, + ) } } -struct InstanceGlobals<'a, C> { - ctx: &'a mut C, - instance: &'a InstanceWrapper, +struct InstanceGlobals<'a> { + instance: &'a mut InstanceWrapper, } -impl<'a, C: AsContextMut> runtime_blob::InstanceGlobals for InstanceGlobals<'a, C> { +impl<'a> runtime_blob::InstanceGlobals for InstanceGlobals<'a> { type Global = wasmtime::Global; fn get_global(&mut self, export_name: &str) -> Self::Global { self.instance - .get_global(&mut self.ctx, export_name) + .get_global(export_name) .expect("get_global is guaranteed to be called with an export name of a global; qed") } fn get_global_value(&mut self, global: &Self::Global) -> Value { - util::from_wasmtime_val(global.get(&mut self.ctx)) + util::from_wasmtime_val(global.get(&mut self.instance.store_mut())) } fn set_global_value(&mut self, global: &Self::Global, value: Value) { - global.set(&mut self.ctx, util::into_wasmtime_val(value)).expect( + global.set(&mut self.instance.store_mut(), util::into_wasmtime_val(value)).expect( "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", ); } @@ -124,50 +146,19 @@ pub struct WasmtimeRuntime { snapshot_data: Option, config: Config, host_functions: Vec<&'static dyn Function>, - engine: Engine, -} - -impl WasmtimeRuntime { - /// Creates the store respecting the set limits. - fn new_store(&self) -> Store { - let limits = if let Some(max_memory_size) = self.config.max_memory_size { - wasmtime::StoreLimitsBuilder::new().memory_size(max_memory_size).build() - } else { - Default::default() - }; - - let mut store = Store::new(&self.engine, StoreData { limits, host_state: None }); - - if self.config.max_memory_size.is_some() { - store.limiter(|s| &mut s.limits); - } - - store - } } impl WasmModule for WasmtimeRuntime { fn new_instance(&self) -> Result> { - let mut store = self.new_store(); - - // Scan all imports, find the matching host functions, and create stubs that adapt arguments - // and results. - // - // NOTE: Attentive reader may notice that this could've been moved in `WasmModule` creation. - // However, I am not sure if that's a good idea since it would be pushing our luck - // further by assuming that `Store` not only `Send` but also `Sync`. - let imports = resolve_imports( - &mut store, - &self.module, - &self.host_functions, - self.config.heap_pages, - self.config.allow_missing_func_imports, - )?; - let strategy = if let Some(ref snapshot_data) = self.snapshot_data { - let instance_wrapper = - InstanceWrapper::new(&self.module, &imports, self.config.heap_pages, &mut store)?; - let heap_base = instance_wrapper.extract_heap_base(&mut store)?; + let mut instance_wrapper = InstanceWrapper::new( + &self.module, + &self.host_functions, + self.config.heap_pages, + self.config.allow_missing_func_imports, + self.config.max_memory_size, + )?; + let heap_base = instance_wrapper.extract_heap_base()?; // This function panics if the instance was created from a runtime blob different from // which the mutable globals were collected. Here, it is easy to see that there is only @@ -175,22 +166,22 @@ impl WasmModule for WasmtimeRuntime { // instance and collecting the mutable globals. let globals_snapshot = GlobalsSnapshot::take( &snapshot_data.mutable_globals, - &mut InstanceGlobals { ctx: &mut store, instance: &instance_wrapper }, + &mut InstanceGlobals { instance: &mut instance_wrapper }, ); Strategy::FastInstanceReuse { - instance_wrapper: Rc::new(instance_wrapper), + instance_wrapper, globals_snapshot, data_segments_snapshot: snapshot_data.data_segments_snapshot.clone(), heap_base, - store, } } else { Strategy::RecreateInstance(InstanceCreator { - imports: Arc::new(imports), module: self.module.clone(), - store, + host_functions: self.host_functions.clone(), heap_pages: self.config.heap_pages, + allow_missing_func_imports: self.config.allow_missing_func_imports, + max_memory_size: self.config.max_memory_size, }) }; @@ -204,68 +195,52 @@ pub struct WasmtimeInstance { strategy: Strategy, } -// This is safe because `WasmtimeInstance` does not leak reference to `self.imports` -// and all imports don't reference anything, other than host functions and memory -unsafe impl Send for WasmtimeInstance {} - impl WasmInstance for WasmtimeInstance { fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result> { match &mut self.strategy { Strategy::FastInstanceReuse { - instance_wrapper, + ref mut instance_wrapper, globals_snapshot, data_segments_snapshot, heap_base, - ref mut store, } => { - let entrypoint = instance_wrapper.resolve_entrypoint(method, &mut *store)?; + let entrypoint = instance_wrapper.resolve_entrypoint(method)?; data_segments_snapshot.apply(|offset, contents| { - instance_wrapper.write_memory_from(&mut *store, Pointer::new(offset), contents) + util::write_memory_from( + instance_wrapper.store_mut(), + Pointer::new(offset), + contents, + ) })?; - globals_snapshot - .apply(&mut InstanceGlobals { ctx: &mut *store, instance: &*instance_wrapper }); + globals_snapshot.apply(&mut InstanceGlobals { instance: instance_wrapper }); let allocator = FreeingBumpHeapAllocator::new(*heap_base); - let result = perform_call( - &mut *store, - data, - instance_wrapper.clone(), - entrypoint, - allocator, - ); + let result = perform_call(data, instance_wrapper, entrypoint, allocator); // Signal to the OS that we are done with the linear memory and that it can be // reclaimed. - instance_wrapper.decommit(store); + instance_wrapper.decommit(); result }, Strategy::RecreateInstance(ref mut instance_creator) => { - let instance_wrapper = instance_creator.instantiate()?; - let heap_base = instance_wrapper.extract_heap_base(&mut instance_creator.store)?; - let entrypoint = - instance_wrapper.resolve_entrypoint(method, &mut instance_creator.store)?; + let mut instance_wrapper = instance_creator.instantiate()?; + let heap_base = instance_wrapper.extract_heap_base()?; + let entrypoint = instance_wrapper.resolve_entrypoint(method)?; let allocator = FreeingBumpHeapAllocator::new(heap_base); - perform_call( - &mut instance_creator.store, - data, - Rc::new(instance_wrapper), - entrypoint, - allocator, - ) + perform_call(data, &mut instance_wrapper, entrypoint, allocator) }, } } fn get_global_const(&mut self, name: &str) -> Result> { match &mut self.strategy { - Strategy::FastInstanceReuse { instance_wrapper, ref mut store, .. } => - instance_wrapper.get_global_val(&mut *store, name), - Strategy::RecreateInstance(ref mut instance_creator) => instance_creator - .instantiate()? - .get_global_val(&mut instance_creator.store, name), + Strategy::FastInstanceReuse { instance_wrapper, .. } => + instance_wrapper.get_global_val(name), + Strategy::RecreateInstance(ref mut instance_creator) => + instance_creator.instantiate()?.get_global_val(name), } } @@ -276,8 +251,8 @@ impl WasmInstance for WasmtimeInstance { // associated with it. None }, - Strategy::FastInstanceReuse { instance_wrapper, store, .. } => - Some(instance_wrapper.base_ptr(&store)), + Strategy::FastInstanceReuse { instance_wrapper, .. } => + Some(instance_wrapper.base_ptr()), } } } @@ -591,7 +566,7 @@ unsafe fn do_create_runtime( }, }; - Ok(WasmtimeRuntime { module: Arc::new(module), snapshot_data, config, host_functions, engine }) + Ok(WasmtimeRuntime { module: Arc::new(module), snapshot_data, config, host_functions }) } fn instrument( @@ -627,50 +602,51 @@ pub fn prepare_runtime_artifact( } fn perform_call( - mut ctx: impl AsContextMut, data: &[u8], - instance_wrapper: Rc, + instance_wrapper: &mut InstanceWrapper, entrypoint: EntryPoint, mut allocator: FreeingBumpHeapAllocator, ) -> Result> { - let (data_ptr, data_len) = - inject_input_data(&mut ctx, &instance_wrapper, &mut allocator, data)?; + let (data_ptr, data_len) = inject_input_data(instance_wrapper, &mut allocator, data)?; - let host_state = HostState::new(allocator, instance_wrapper.clone()); + let host_state = HostState::new(allocator); // Set the host state before calling into wasm. - ctx.as_context_mut().data_mut().host_state = Some(Rc::new(host_state)); + instance_wrapper.store_mut().data_mut().host_state = Some(host_state); - let ret = entrypoint.call(&mut ctx, data_ptr, data_len).map(unpack_ptr_and_len); + let ret = entrypoint + .call(instance_wrapper.store_mut(), data_ptr, data_len) + .map(unpack_ptr_and_len); // Reset the host state - ctx.as_context_mut().data_mut().host_state = None; + instance_wrapper.store_mut().data_mut().host_state = None; let (output_ptr, output_len) = ret?; - let output = extract_output_data(ctx, &instance_wrapper, output_ptr, output_len)?; + let output = extract_output_data(instance_wrapper, output_ptr, output_len)?; Ok(output) } fn inject_input_data( - mut ctx: impl AsContextMut, - instance: &InstanceWrapper, + instance: &mut InstanceWrapper, allocator: &mut FreeingBumpHeapAllocator, data: &[u8], ) -> Result<(Pointer, WordSize)> { + let mut ctx = instance.store_mut(); + let memory = ctx.data().memory(); + let memory = memory.data_mut(&mut ctx); let data_len = data.len() as WordSize; - let data_ptr = instance.allocate(&mut ctx, allocator, data_len)?; - instance.write_memory_from(ctx, data_ptr, data)?; + let data_ptr = allocator.allocate(memory, data_len)?; + util::write_memory_from(instance.store_mut(), data_ptr, data)?; Ok((data_ptr, data_len)) } fn extract_output_data( - ctx: impl AsContext, instance: &InstanceWrapper, output_ptr: u32, output_len: u32, ) -> Result> { let mut output = vec![0; output_len as usize]; - instance.read_memory_into(ctx, Pointer::new(output_ptr), &mut output)?; + util::read_memory_into(instance.store(), Pointer::new(output_ptr), &mut output)?; Ok(output) } diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index 261afba0c6bc9..c34cbfad11138 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -310,3 +310,39 @@ fn test_max_memory_pages() { ) .unwrap(); } + +// This test takes quite a while to execute in a debug build (over 6 minutes on a TR 3970x) +// so it's ignored by default unless it was compiled with `--release`. +#[cfg_attr(build_type = "debug", ignore)] +#[test] +fn test_instances_without_reuse_are_not_leaked() { + use sp_wasm_interface::HostFunctions; + + let runtime = crate::create_runtime( + RuntimeBlob::uncompress_if_needed(&wasm_binary_unwrap()[..]).unwrap(), + crate::Config { + heap_pages: 2048, + max_memory_size: None, + allow_missing_func_imports: true, + cache_path: None, + semantics: crate::Semantics { + fast_instance_reuse: false, + deterministic_stack_limit: None, + canonicalize_nans: false, + parallel_compilation: true, + }, + }, + sp_io::SubstrateHostFunctions::host_functions(), + ) + .unwrap(); + + // As long as the `wasmtime`'s `Store` lives the instances spawned through it + // will live indefinitely. Currently it has a maximum limit of 10k instances, + // so let's spawn 10k + 1 of them to make sure our code doesn't keep the `Store` + // alive longer than it is necessary. (And since we disabled instance reuse + // a new instance will be spawned on each call.) + let mut instance = runtime.new_instance().unwrap(); + for _ in 0..10001 { + instance.call_export("test_empty_return", &[0]).unwrap(); + } +} diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index 2c135fe7a343b..2c9379e9ce812 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -16,7 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_wasm_interface::Value; +use crate::runtime::StoreData; +use sc_executor_common::{ + error::{Error, Result}, + util::checked_range, +}; +use sp_wasm_interface::{Pointer, Value}; +use wasmtime::{AsContext, AsContextMut}; /// Converts a [`wasmtime::Val`] into a substrate runtime interface [`Value`]. /// @@ -41,3 +47,54 @@ pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { Value::F64(f_bits) => wasmtime::Val::F64(f_bits), } } + +/// Read data from a slice of memory into a newly allocated buffer. +/// +/// Returns an error if the read would go out of the memory bounds. +pub(crate) fn read_memory( + ctx: impl AsContext, + source_addr: Pointer, + size: usize, +) -> Result> { + let range = + checked_range(source_addr.into(), size, ctx.as_context().data().memory().data_size(&ctx)) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + + let mut buffer = vec![0; range.len()]; + read_memory_into(ctx, source_addr, &mut buffer)?; + + Ok(buffer) +} + +/// Read data from the instance memory into a slice. +/// +/// Returns an error if the read would go out of the memory bounds. +pub(crate) fn read_memory_into( + ctx: impl AsContext, + address: Pointer, + dest: &mut [u8], +) -> Result<()> { + let memory = ctx.as_context().data().memory().data(&ctx); + + let range = checked_range(address.into(), dest.len(), memory.len()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + dest.copy_from_slice(&memory[range]); + Ok(()) +} + +/// Write data to the instance memory from a slice. +/// +/// Returns an error if the write would go out of the memory bounds. +pub(crate) fn write_memory_from( + mut ctx: impl AsContextMut, + address: Pointer, + data: &[u8], +) -> Result<()> { + let memory = ctx.as_context().data().memory(); + let memory = memory.data_mut(&mut ctx); + + let range = checked_range(address.into(), data.len(), memory.len()) + .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; + memory[range].copy_from_slice(data); + Ok(()) +} From fe7c02941122bbe4a7956aff53739b62f575240d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Nov 2021 09:39:43 +0100 Subject: [PATCH 132/162] frame-benchmarking: Fix `min-square` for `--steps=1` (#10323) --- frame/benchmarking/src/analysis.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 5ffb6e93c8fc3..6e0ffd23ee988 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -199,7 +199,7 @@ impl Analysis { } pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { - if r[0].components.is_empty() { + if r[0].components.len() <= 1 { return Self::median_value(r, selector) } From 09252188126a94a40eae47b694a6c9d8f6f35662 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 23 Nov 2021 10:19:05 +0100 Subject: [PATCH 133/162] Put back consensus_engine, only accept its absence (#10345) --- client/chain-spec/src/chain_spec.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 8d8f62a5182cf..e8247d7314991 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -168,7 +168,9 @@ struct ClientSpec { #[serde(flatten)] extensions: E, // Never used, left only for backward compatibility. - #[serde(default, skip_serializing)] + // In a future version, a `skip_serializing` attribute should be added in order to no longer + // generate chain specs with this field. + #[serde(default)] consensus_engine: (), #[serde(skip_serializing)] #[allow(unused)] From 1d74e58d2c448a056e915f1672bc87c428e09cb0 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Tue, 23 Nov 2021 19:06:33 +0900 Subject: [PATCH 134/162] fix (#10342) --- frame/contracts/src/storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 41db0796717e4..2b994d66af7e6 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -182,7 +182,7 @@ where let outcome = child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget)); let keys_removed = match outcome { - // This should not happen as our budget was large enough to remove all keys. + // This happens when our budget wasn't large enough to remove all keys. KillStorageResult::SomeRemaining(count) => count, KillStorageResult::AllRemoved(count) => { // We do not care to preserve order. The contract is deleted already and From c3de64869fc30da61ecb149b7903debddc46940e Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Tue, 23 Nov 2021 19:28:58 +0900 Subject: [PATCH 135/162] remove unused file (#10343) --- client/network/src/gossip.rs | 229 -------------------------- client/network/src/gossip/tests.rs | 250 ----------------------------- 2 files changed, 479 deletions(-) delete mode 100644 client/network/src/gossip.rs delete mode 100644 client/network/src/gossip/tests.rs diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs deleted file mode 100644 index 0bc46b2164bcb..0000000000000 --- a/client/network/src/gossip.rs +++ /dev/null @@ -1,229 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Helper for sending rate-limited gossip messages. -//! -//! # Context -//! -//! The [`NetworkService`] struct provides a way to send notifications to a certain peer through -//! the [`NetworkService::notification_sender`] method. This method is quite low level and isn't -//! expected to be used directly. -//! -//! The [`QueuedSender`] struct provided by this module is built on top of -//! [`NetworkService::notification_sender`] and provides a cleaner way to send notifications. -//! -//! # Behaviour -//! -//! An instance of [`QueuedSender`] is specific to a certain combination of `PeerId` and -//! protocol name. It maintains a buffer of messages waiting to be sent out. The user of this API -//! is able to manipulate that queue, adding or removing obsolete messages. -//! -//! Creating a [`QueuedSender`] also returns a opaque `Future` whose responsibility it to -//! drain that queue and actually send the messages. If the substream with the given combination -//! of peer and protocol is closed, the queue is silently discarded. It is the role of the user -//! to track which peers we are connected to. -//! -//! In normal situations, messages sent through a [`QueuedSender`] will arrive in the same -//! order as they have been sent. -//! It is possible, in the situation of disconnects and reconnects, that messages arrive in a -//! different order. See also . -//! However, if multiple instances of [`QueuedSender`] exist for the same peer and protocol, or -//! if some other code uses the [`NetworkService`] to send notifications to this combination or -//! peer and protocol, then the notifications will be interleaved in an unpredictable way. -//! - -use crate::{ExHashT, NetworkService}; - -use async_std::sync::{Mutex, MutexGuard}; -use futures::prelude::*; -use futures::channel::mpsc::{channel, Receiver, Sender}; -use libp2p::PeerId; -use sp_runtime::traits::Block as BlockT; -use std::{ - borrow::Cow, - collections::VecDeque, - fmt, - sync::Arc, -}; - -#[cfg(test)] -mod tests; - -/// Notifications sender for a specific combination of network service, peer, and protocol. -pub struct QueuedSender { - /// Shared between the user-facing [`QueuedSender`] and the background future. - shared_message_queue: SharedMessageQueue, - /// Used to notify the background future to check for new messages in the message queue. - notify_background_future: Sender<()>, - /// Maximum number of elements in [`QueuedSender::shared_message_queue`]. - queue_size_limit: usize, -} - -impl QueuedSender { - /// Returns a new [`QueuedSender`] containing a queue of message for this specific - /// combination of peer and protocol. - /// - /// In addition to the [`QueuedSender`], also returns a `Future` whose role is to drive - /// the messages sending forward. - pub fn new( - service: Arc>, - peer_id: PeerId, - protocol: Cow<'static, str>, - queue_size_limit: usize, - messages_encode: F - ) -> (Self, impl Future + Send + 'static) - where - M: Send + 'static, - B: BlockT + 'static, - H: ExHashT, - F: Fn(M) -> Vec + Send + 'static, - { - let (notify_background_future, wait_for_sender) = channel(0); - - let shared_message_queue = Arc::new(Mutex::new( - VecDeque::with_capacity(queue_size_limit), - )); - - let background_future = create_background_future( - wait_for_sender, - service, - peer_id, - protocol, - shared_message_queue.clone(), - messages_encode - ); - - let sender = Self { - shared_message_queue, - notify_background_future, - queue_size_limit, - }; - - (sender, background_future) - } - - /// Locks the queue of messages towards this peer. - /// - /// The returned `Future` is expected to be ready quite quickly. - pub async fn lock_queue<'a>(&'a mut self) -> QueueGuard<'a, M> { - QueueGuard { - message_queue: self.shared_message_queue.lock().await, - queue_size_limit: self.queue_size_limit, - notify_background_future: &mut self.notify_background_future, - } - } - - /// Pushes a message to the queue, or discards it if the queue is full. - /// - /// The returned `Future` is expected to be ready quite quickly. - pub async fn queue_or_discard(&mut self, message: M) - where - M: Send + 'static - { - self.lock_queue().await.push_or_discard(message); - } -} - -impl fmt::Debug for QueuedSender { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("QueuedSender").finish() - } -} - -/// Locked queue of messages to the given peer. -/// -/// As long as this struct exists, the background future is asleep and the owner of the -/// [`QueueGuard`] is in total control of the message queue. Messages can only ever be sent out on -/// the network after the [`QueueGuard`] is dropped. -#[must_use] -pub struct QueueGuard<'a, M> { - message_queue: MutexGuard<'a, MessageQueue>, - /// Same as [`QueuedSender::queue_size_limit`]. - queue_size_limit: usize, - notify_background_future: &'a mut Sender<()>, -} - -impl<'a, M: Send + 'static> QueueGuard<'a, M> { - /// Pushes a message to the queue, or discards it if the queue is full. - /// - /// The message will only start being sent out after the [`QueueGuard`] is dropped. - pub fn push_or_discard(&mut self, message: M) { - if self.message_queue.len() < self.queue_size_limit { - self.message_queue.push_back(message); - } - } - - /// Calls `filter` for each message in the queue, and removes the ones for which `false` is - /// returned. - /// - /// > **Note**: The parameter of `filter` is a `&M` and not a `&mut M` (which would be - /// > better) because the underlying implementation relies on `VecDeque::retain`. - pub fn retain(&mut self, filter: impl FnMut(&M) -> bool) { - self.message_queue.retain(filter); - } -} - -impl<'a, M> Drop for QueueGuard<'a, M> { - fn drop(&mut self) { - // Notify background future to check for new messages in the message queue. - let _ = self.notify_background_future.try_send(()); - } -} - -type MessageQueue = VecDeque; - -/// [`MessageQueue`] shared between [`QueuedSender`] and background future. -type SharedMessageQueue = Arc>>; - -async fn create_background_future Vec>( - mut wait_for_sender: Receiver<()>, - service: Arc>, - peer_id: PeerId, - protocol: Cow<'static, str>, - shared_message_queue: SharedMessageQueue, - messages_encode: F, -) { - loop { - if wait_for_sender.next().await.is_none() { - return - } - - loop { - let mut queue_guard = shared_message_queue.lock().await; - let next_message = match queue_guard.pop_front() { - Some(msg) => msg, - None => break, - }; - drop(queue_guard); - - // Starting from below, we try to send the message. If an error happens when sending, - // the only sane option we have is to silently discard the message. - let sender = match service.notification_sender(peer_id.clone(), protocol.clone()) { - Ok(s) => s, - Err(_) => continue, - }; - - let ready = match sender.ready().await { - Ok(r) => r, - Err(_) => continue, - }; - - let _ = ready.send(messages_encode(next_message)); - } - } -} diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs deleted file mode 100644 index 88c4160bc5066..0000000000000 --- a/client/network/src/gossip/tests.rs +++ /dev/null @@ -1,250 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::block_request_handler::BlockRequestHandler; -use crate::state_request_handler::StateRequestHandler; -use crate::light_client_requests::handler::LightClientRequestHandler; -use crate::gossip::QueuedSender; -use crate::{config, Event, NetworkService, NetworkWorker}; - -use futures::prelude::*; -use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{borrow::Cow, sync::Arc, time::Duration}; -use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; - -type TestNetworkService = NetworkService< - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::Hash, ->; - -/// Builds a full node to be used for testing. Returns the node service and its associated events -/// stream. -/// -/// > **Note**: We return the events stream in order to not possibly lose events between the -/// > construction of the service and the moment the events stream is grabbed. -fn build_test_full_node(network_config: config::NetworkConfiguration) - -> (Arc, impl Stream) -{ - let client = Arc::new( - TestClientBuilder::with_default_backend() - .build_with_longest_chain() - .0, - ); - - #[derive(Clone)] - struct PassThroughVerifier(bool); - - #[async_trait::async_trait] - impl sc_consensus::Verifier for PassThroughVerifier { - async fn verify( - &mut self, - mut block: sp_consensus::BlockImportParams, - ) -> Result< - ( - sc_consensus::BlockImportParams, - Option)>>, - ), - String, - > { - let maybe_keys = block.header - .digest() - .log(|l| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) - }) - }) - .map(|blob| { - vec![( - sp_blockchain::well_known_cache_keys::AUTHORITIES, - blob.to_vec(), - )] - }); - - block.finalized = self.0; - block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); - Ok((block, maybe_keys)) - } - } - - let import_queue = Box::new(sc_consensus::BasicQueue::new( - PassThroughVerifier(false), - Box::new(client.clone()), - None, - &sp_core::testing::TaskExecutor::new(), - None, - )); - - let protocol_id = config::ProtocolId::from("/test-protocol-name"); - - let block_request_protocol_config = { - let (handler, protocol_config) = BlockRequestHandler::new( - &protocol_id, - client.clone(), - 50, - ); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let state_request_protocol_config = { - let (handler, protocol_config) = StateRequestHandler::new( - &protocol_id, - client.clone(), - 50, - ); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let light_client_request_protocol_config = { - let (handler, protocol_config) = LightClientRequestHandler::new( - &protocol_id, - client.clone(), - ); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let worker = NetworkWorker::new(config::Params { - role: config::Role::Full, - executor: None, - transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), - network_config, - chain: client.clone(), - on_demand: None, - transaction_pool: Arc::new(crate::config::EmptyTransactionPool), - protocol_id, - import_queue, - block_announce_validator: Box::new( - sp_consensus::block_validation::DefaultBlockAnnounceValidator, - ), - metrics_registry: None, - block_request_protocol_config, - state_request_protocol_config, - light_client_request_protocol_config, - warp_sync: None, - }) - .unwrap(); - - let service = worker.service().clone(); - let event_stream = service.event_stream("test"); - - async_std::task::spawn(async move { - futures::pin_mut!(worker); - let _ = worker.await; - }); - - (service, event_stream) -} - -const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); - -/// Builds two nodes and their associated events stream. -/// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. -fn build_nodes_one_proto() - -> (Arc, impl Stream, Arc, impl Stream) -{ - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - set_config: Default::default() - } - ], - listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() - }); - - let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![], - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), - }], - .. Default::default() - }, - } - ], - transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() - }); - - (node1, events_stream1, node2, events_stream2) -} - -#[test] -fn basic_works() { - const NUM_NOTIFS: usize = 256; - - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); - let node2_id = node2.local_peer_id().clone(); - - let receiver = async_std::task::spawn(async move { - let mut received_notifications = 0; - - while received_notifications < NUM_NOTIFS { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamClosed { .. } => panic!(), - Event::NotificationsReceived { messages, .. } => { - for message in messages { - assert_eq!(message.0, PROTOCOL_NAME); - assert_eq!(message.1, &b"message"[..]); - received_notifications += 1; - } - } - _ => {} - }; - - if rand::random::() < 2 { - async_std::task::sleep(Duration::from_millis(rand::random::() % 750)).await; - } - } - }); - - async_std::task::block_on(async move { - let (mut sender, bg_future) = - QueuedSender::new(node1, node2_id, PROTOCOL_NAME, NUM_NOTIFS, |msg| msg); - async_std::task::spawn(bg_future); - - // Wait for the `NotificationStreamOpened`. - loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, - _ => {} - }; - } - - for _ in 0..NUM_NOTIFS { - sender.queue_or_discard(b"message".to_vec()).await; - } - - receiver.await; - }); -} From 7a531b08ae8ea975dc868ed2a6fa4f4856c0816a Mon Sep 17 00:00:00 2001 From: Xavier Lau Date: Tue, 23 Nov 2021 21:04:05 +0800 Subject: [PATCH 136/162] Support MMR Pruning (#9700) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use `0.3.2` * Replace `u64` with `NodeIndex` * Fix Typo * Add Pruning Logic * Fix Some Tests * Remove Comment * Log Only Under STD * Return while No Element to Append * Optimize Pruning Algorithm * Update Doc * Update Doc * Zero Copy Algorithm * Import Missing Type * Fix Merge Mistake * Import Missing Item * Make `verify` Off-Chain * `cargo fmt` * Avoid using NodeIndex in incorrect places. * Simplify pruning. * Format Co-authored-by: Tomasz Drwięga --- Cargo.lock | 4 +- bin/node/runtime/src/lib.rs | 2 +- frame/merkle-mountain-range/Cargo.toml | 2 +- .../primitives/src/lib.rs | 16 ++- frame/merkle-mountain-range/rpc/src/lib.rs | 6 +- .../merkle-mountain-range/src/benchmarking.rs | 2 +- frame/merkle-mountain-range/src/lib.rs | 14 +-- frame/merkle-mountain-range/src/mmr/mmr.rs | 14 +-- .../merkle-mountain-range/src/mmr/storage.rs | 99 +++++++++++++++---- frame/merkle-mountain-range/src/mmr/utils.rs | 14 +-- frame/merkle-mountain-range/src/tests.rs | 49 +++++++-- 11 files changed, 163 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a45e0accd49b7..0091223b09e6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -936,9 +936,9 @@ dependencies = [ [[package]] name = "ckb-merkle-mountain-range" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e486fe53bb9f2ca0f58cb60e8679a5354fd6687a839942ef0a75967250289ca6" +checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" dependencies = [ "cfg-if 0.1.10", ] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5b3c0685d1a2a..299b7257f9d45 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1559,7 +1559,7 @@ impl_runtime_apis! { Block, mmr::Hash, > for Runtime { - fn generate_proof(leaf_index: u64) + fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { Mmr::generate_proof(leaf_index) diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index ca09725769ab2..2ff8b16fa4bde 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } -mmr-lib = { package = "ckb-merkle-mountain-range", default-features = false, version = "0.3.1" } +mmr-lib = { package = "ckb-merkle-mountain-range", default-features = false, version = "0.3.2" } sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } diff --git a/frame/merkle-mountain-range/primitives/src/lib.rs b/frame/merkle-mountain-range/primitives/src/lib.rs index dac57bd42cd35..9aae26508f3c4 100644 --- a/frame/merkle-mountain-range/primitives/src/lib.rs +++ b/frame/merkle-mountain-range/primitives/src/lib.rs @@ -26,6 +26,16 @@ use sp_std::fmt; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; +/// A type to describe node position in the MMR (node index). +pub type NodeIndex = u64; + +/// A type to describe leaf position in the MMR. +/// +/// Note this is different from [`NodeIndex`], which can be applied to +/// both leafs and inner nodes. Leafs will always have consecutive `LeafIndex`, +/// but might be actually at different positions in the MMR `NodeIndex`. +pub type LeafIndex = u64; + /// A provider of the MMR's leaf data. pub trait LeafDataProvider { /// A type that should end up in the leaf of MMR. @@ -275,9 +285,9 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); #[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] pub struct Proof { /// The index of the leaf the proof is for. - pub leaf_index: u64, + pub leaf_index: LeafIndex, /// Number of leaves in MMR, when the proof was generated. - pub leaf_count: u64, + pub leaf_count: NodeIndex, /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). pub items: Vec, } @@ -402,7 +412,7 @@ sp_api::decl_runtime_apis! { /// API to interact with MMR pallet. pub trait MmrApi { /// Generate MMR proof for a leaf under given index. - fn generate_proof(leaf_index: u64) -> Result<(EncodableOpaqueLeaf, Proof), Error>; + fn generate_proof(leaf_index: LeafIndex) -> Result<(EncodableOpaqueLeaf, Proof), Error>; /// Verify MMR proof against on-chain MMR. /// diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 4719893778f6a..004a70a8e962e 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -32,7 +32,7 @@ use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -pub use pallet_mmr_primitives::MmrApi as MmrRuntimeApi; +pub use pallet_mmr_primitives::{LeafIndex, MmrApi as MmrRuntimeApi}; /// Retrieved MMR leaf and its proof. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] @@ -71,7 +71,7 @@ pub trait MmrApi { #[rpc(name = "mmr_generateProof")] fn generate_proof( &self, - leaf_index: u64, + leaf_index: LeafIndex, at: Option, ) -> Result>; } @@ -98,7 +98,7 @@ where { fn generate_proof( &self, - leaf_index: u64, + leaf_index: LeafIndex, at: Option<::Hash>, ) -> Result::Hash>> { let api = self.client.runtime_api(); diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs index d6ef76d01ac3a..7c0dae26b3373 100644 --- a/frame/merkle-mountain-range/src/benchmarking.rs +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -25,7 +25,7 @@ benchmarks_instance_pallet! { on_initialize { let x in 1 .. 1_000; - let leaves = x as u64; + let leaves = x as NodeIndex; }: { for b in 0..leaves { Pallet::::on_initialize((b as u32).into()); diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 01bf1b2254f09..12577880c5600 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -70,10 +70,10 @@ mod mock; mod tests; pub use pallet::*; -pub use pallet_mmr_primitives as primitives; +pub use pallet_mmr_primitives::{self as primitives, NodeIndex}; pub trait WeightInfo { - fn on_initialize(peaks: u64) -> Weight; + fn on_initialize(peaks: NodeIndex) -> Weight; } #[frame_support::pallet] @@ -160,7 +160,7 @@ pub mod pallet { /// Current size of the MMR (number of leaves). #[pallet::storage] #[pallet::getter(fn mmr_leaves)] - pub type NumberOfLeaves = StorageValue<_, u64, ValueQuery>; + pub type NumberOfLeaves = StorageValue<_, NodeIndex, ValueQuery>; /// Hashes of the nodes in the MMR. /// @@ -169,7 +169,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn mmr_peak)] pub type Nodes, I: 'static = ()> = - StorageMap<_, Identity, u64, >::Hash, OptionQuery>; + StorageMap<_, Identity, NodeIndex, >::Hash, OptionQuery>; #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { @@ -228,7 +228,7 @@ where } impl, I: 'static> Pallet { - fn offchain_key(pos: u64) -> sp_std::prelude::Vec { + fn offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { (T::INDEXING_PREFIX, pos).encode() } @@ -239,7 +239,7 @@ impl, I: 'static> Pallet { /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_proof( - leaf_index: u64, + leaf_index: NodeIndex, ) -> Result<(LeafOf, primitives::Proof<>::Hash>), primitives::Error> { let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); mmr.generate_proof(leaf_index) @@ -263,7 +263,7 @@ impl, I: 'static> Pallet { .log_debug("The proof has incorrect number of leaves or proof items.")) } - let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); + let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); let is_valid = mmr.verify_leaf_proof(leaf, proof)?; if is_valid { Ok(()) diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index d5036e58f432e..a1963275a0cde 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -21,7 +21,7 @@ use crate::{ utils::NodesUtils, Hasher, Node, NodeOf, }, - primitives::{self, Error}, + primitives::{self, Error, NodeIndex}, Config, HashingOf, }; #[cfg(not(feature = "std"))] @@ -60,7 +60,7 @@ where Storage: mmr_lib::MMRStore>, { mmr: mmr_lib::MMR, Hasher, L>, Storage>, - leaves: u64, + leaves: NodeIndex, } impl Mmr @@ -71,7 +71,7 @@ where Storage: mmr_lib::MMRStore>, { /// Create a pointer to an existing MMR with given number of leaves. - pub fn new(leaves: u64) -> Self { + pub fn new(leaves: NodeIndex) -> Self { let size = NodesUtils::new(leaves).size(); Self { mmr: mmr_lib::MMR::new(size, Default::default()), leaves } } @@ -94,7 +94,7 @@ where /// Return the internal size of the MMR (number of nodes). #[cfg(test)] - pub fn size(&self) -> u64 { + pub fn size(&self) -> NodeIndex { self.mmr.mmr_size() } } @@ -109,7 +109,7 @@ where /// Push another item to the MMR. /// /// Returns element position (index) in the MMR. - pub fn push(&mut self, leaf: L) -> Option { + pub fn push(&mut self, leaf: L) -> Option { let position = self.mmr.push(Node::Data(leaf)).map_err(|e| Error::Push.log_error(e)).ok()?; @@ -120,7 +120,7 @@ where /// Commit the changes to underlying storage, return current number of leaves and /// calculate the new MMR's root hash. - pub fn finalize(self) -> Result<(u64, >::Hash), Error> { + pub fn finalize(self) -> Result<(NodeIndex, >::Hash), Error> { let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; self.mmr.commit().map_err(|e| Error::Commit.log_error(e))?; Ok((self.leaves, root.hash())) @@ -140,7 +140,7 @@ where /// (i.e. you can't run the function in the pruned storage). pub fn generate_proof( &self, - leaf_index: u64, + leaf_index: NodeIndex, ) -> Result<(L, primitives::Proof<>::Hash>), Error> { let position = mmr_lib::leaf_index_to_pos(leaf_index); let store = >::default(); diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index 09e24017816ec..6e4bf91d802fa 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -18,19 +18,24 @@ //! A MMR storage implementations. use codec::Encode; +use frame_support::log; +use mmr_lib::helper; +use sp_io::offchain_index; +use sp_std::iter::Peekable; #[cfg(not(feature = "std"))] -use sp_std::prelude::Vec; +use sp_std::prelude::*; use crate::{ - mmr::{Node, NodeOf}, - primitives, Config, Nodes, NumberOfLeaves, Pallet, + mmr::{utils::NodesUtils, Node, NodeOf}, + primitives::{self, NodeIndex}, + Config, Nodes, NumberOfLeaves, Pallet, }; /// A marker type for runtime-specific storage implementation. /// /// Allows appending new items to the MMR and proof verification. /// MMR nodes are appended to two different storages: -/// 1. We add nodes (leaves) hashes to the on-chain storge (see [crate::Nodes]). +/// 1. We add nodes (leaves) hashes to the on-chain storage (see [crate::Nodes]). /// 2. We add full leaves (and all inner nodes as well) into the `IndexingAPI` during block /// processing, so the values end up in the Offchain DB if indexing is enabled. pub struct RuntimeStorage; @@ -60,14 +65,14 @@ where I: 'static, L: primitives::FullLeaf + codec::Decode, { - fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { let key = Pallet::::offchain_key(pos); // Retrieve the element from Off-chain DB. Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } - fn append(&mut self, _: u64, _: Vec>) -> mmr_lib::Result<()> { + fn append(&mut self, _: NodeIndex, _: Vec>) -> mmr_lib::Result<()> { panic!("MMR must not be altered in the off-chain context.") } } @@ -78,32 +83,90 @@ where I: 'static, L: primitives::FullLeaf, { - fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { + fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { Ok(>::get(pos).map(Node::Hash)) } - fn append(&mut self, pos: u64, elems: Vec>) -> mmr_lib::Result<()> { - let mut leaves = crate::NumberOfLeaves::::get(); - let mut size = crate::mmr::utils::NodesUtils::new(leaves).size(); + fn append(&mut self, pos: NodeIndex, elems: Vec>) -> mmr_lib::Result<()> { + if elems.is_empty() { + return Ok(()) + } + + sp_std::if_std! { + log::trace!("elems: {:?}", elems.iter().map(|elem| elem.hash()).collect::>()); + } + + let leaves = NumberOfLeaves::::get(); + let size = NodesUtils::new(leaves).size(); + if pos != size { return Err(mmr_lib::Error::InconsistentStore) } + let new_size = size + elems.len() as NodeIndex; + + // A sorted (ascending) iterator over peak indices to prune and persist. + let (peaks_to_prune, mut peaks_to_store) = peaks_to_prune_and_store(size, new_size); + + // Now we are going to iterate over elements to insert + // and keep track of the current `node_index` and `leaf_index`. + let mut leaf_index = leaves; + let mut node_index = size; + for elem in elems { - // on-chain we only store the hash (even if it's a leaf) - >::insert(size, elem.hash()); - // Indexing API is used to store the full leaf content. - let key = Pallet::::offchain_key(size); - elem.using_encoded(|elem| sp_io::offchain_index::set(&key, elem)); - size += 1; + // Indexing API is used to store the full node content (both leaf and inner). + elem.using_encoded(|elem| { + offchain_index::set(&Pallet::::offchain_key(node_index), elem) + }); + + // On-chain we are going to only store new peaks. + if peaks_to_store.next_if_eq(&node_index).is_some() { + >::insert(node_index, elem.hash()); + } + // Increase the indices. if let Node::Data(..) = elem { - leaves += 1; + leaf_index += 1; } + node_index += 1; } - NumberOfLeaves::::put(leaves); + // Update current number of leaves. + NumberOfLeaves::::put(leaf_index); + + // And remove all remaining items from `peaks_before` collection. + for pos in peaks_to_prune { + >::remove(pos); + } Ok(()) } } + +fn peaks_to_prune_and_store( + old_size: NodeIndex, + new_size: NodeIndex, +) -> (impl Iterator, Peekable>) { + // A sorted (ascending) collection of peak indices before and after insertion. + // both collections may share a common prefix. + let peaks_before = if old_size == 0 { vec![] } else { helper::get_peaks(old_size) }; + let peaks_after = helper::get_peaks(new_size); + sp_std::if_std! { + log::trace!("peaks_before: {:?}", peaks_before); + log::trace!("peaks_after: {:?}", peaks_after); + } + let mut peaks_before = peaks_before.into_iter().peekable(); + let mut peaks_after = peaks_after.into_iter().peekable(); + + // Consume a common prefix between `peaks_before` and `peaks_after`, + // since that's something we will not be touching anyway. + while peaks_before.peek() == peaks_after.peek() { + peaks_before.next(); + peaks_after.next(); + } + + // what's left in both collections is: + // 1. Old peaks to remove from storage + // 2. New peaks to persist in storage + (peaks_before, peaks_after) +} diff --git a/frame/merkle-mountain-range/src/mmr/utils.rs b/frame/merkle-mountain-range/src/mmr/utils.rs index 8fc725f11e72f..77ce0e8ebbb36 100644 --- a/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/frame/merkle-mountain-range/src/mmr/utils.rs @@ -17,29 +17,31 @@ //! Merkle Mountain Range utilities. +use crate::primitives::{LeafIndex, NodeIndex}; + /// MMR nodes & size -related utilities. pub struct NodesUtils { - no_of_leaves: u64, + no_of_leaves: LeafIndex, } impl NodesUtils { /// Create new instance of MMR nodes utilities for given number of leaves. - pub fn new(no_of_leaves: u64) -> Self { + pub fn new(no_of_leaves: LeafIndex) -> Self { Self { no_of_leaves } } /// Calculate number of peaks in the MMR. - pub fn number_of_peaks(&self) -> u64 { - self.number_of_leaves().count_ones() as u64 + pub fn number_of_peaks(&self) -> NodeIndex { + self.number_of_leaves().count_ones() as NodeIndex } /// Return the number of leaves in the MMR. - pub fn number_of_leaves(&self) -> u64 { + pub fn number_of_leaves(&self) -> LeafIndex { self.no_of_leaves } /// Calculate the total size of MMR (number of nodes). - pub fn size(&self) -> u64 { + pub fn size(&self) -> NodeIndex { 2 * self.no_of_leaves - self.number_of_peaks() } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 50512e9286951..3faf2bfbd9c2f 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{mock::*, *}; +use crate::{mmr::utils, mock::*, *}; use frame_support::traits::OnInitialize; +use mmr_lib::helper; use pallet_mmr_primitives::{Compact, Proof}; use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, @@ -48,6 +49,12 @@ fn new_block() -> u64 { MMR::on_initialize(number) } +fn peaks_from_leaves_count(leaves_count: NodeIndex) -> Vec { + let size = utils::NodesUtils::new(leaves_count).size(); + + helper::get_peaks(size) +} + pub(crate) fn hex(s: &str) -> H256 { s.parse().unwrap() } @@ -115,10 +122,29 @@ fn should_append_to_mmr_when_on_initialize_is_called() { ext.execute_with(|| { // when new_block(); + + // then + assert_eq!(crate::NumberOfLeaves::::get(), 1); + assert_eq!( + ( + crate::Nodes::::get(0), + crate::Nodes::::get(1), + crate::RootHash::::get(), + ), + ( + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + None, + hex("0x4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0"), + ) + ); + + // when new_block(); // then assert_eq!(crate::NumberOfLeaves::::get(), 2); + let peaks = peaks_from_leaves_count(2); + assert_eq!(peaks, vec![2]); assert_eq!( ( crate::Nodes::::get(0), @@ -128,8 +154,8 @@ fn should_append_to_mmr_when_on_initialize_is_called() { crate::RootHash::::get(), ), ( - Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), - Some(hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705")), + None, + None, Some(hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")), None, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), @@ -166,14 +192,21 @@ fn should_construct_larger_mmr_correctly() { // then assert_eq!(crate::NumberOfLeaves::::get(), 7); + let peaks = peaks_from_leaves_count(7); + assert_eq!(peaks, vec![6, 9, 10]); + for i in (0..=10).filter(|p| !peaks.contains(p)) { + assert!(crate::Nodes::::get(i).is_none()); + } assert_eq!( ( - crate::Nodes::::get(0), + crate::Nodes::::get(6), + crate::Nodes::::get(9), crate::Nodes::::get(10), crate::RootHash::::get(), ), ( - Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + Some(hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252")), + Some(hex("7e4316ae2ebf7c3b6821cb3a46ca8b7a4f9351a9b40fcf014bb0a4fd8e8f29da")), Some(hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c")), hex("e45e25259f7930626431347fa4dd9aae7ac83b4966126d425ca70ab343709d2c"), ) @@ -265,11 +298,7 @@ fn should_verify() { crate::Pallet::::generate_proof(5).unwrap() }); - // Now to verify the proof, we really shouldn't require offchain storage or extension. - // Hence we initialize the storage once again, using different externalities and then - // verify. - let mut ext2 = new_test_ext(); - ext2.execute_with(|| { + ext.execute_with(|| { init_chain(7); // then assert_eq!(crate::Pallet::::verify_leaf(leaf, proof5), Ok(())); From 1d8f7bf6de1f447b706a121c83e759da807d3a01 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 23 Nov 2021 17:04:06 +0100 Subject: [PATCH 137/162] [ci] Add ssh token for publishing gh-pages (#10338) * [ci] Added ssh token for publishing gh-pages * changed ssh commands for gh-pages * return github_token back to vault secrets * check simnet-tests-quick without vault secrets * remove vault secrets from simnet jobs --- .gitlab-ci.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index aa275061088d6..8815ab15f640e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -204,6 +204,9 @@ default: GITHUB_PR_TOKEN: vault: cicd/gitlab/parity/GITHUB_PR_TOKEN@kv file: false + GITHUB_TOKEN: + vault: cicd/gitlab/parity/GITHUB_TOKEN@kv + file: false AWS_ACCESS_KEY_ID: vault: cicd/gitlab/$CI_PROJECT_PATH/AWS_ACCESS_KEY_ID@kv file: false @@ -228,9 +231,9 @@ default: GITHUB_RELEASE_TOKEN: vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_RELEASE_TOKEN@kv file: false - GITHUB_TOKEN: - vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_TOKEN@kv - file: false + GITHUB_SSH_PRIV_KEY: + vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_SSH_PRIV_KEY@kv + file: true GITHUB_USER: vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_USER@kv file: false @@ -809,12 +812,14 @@ publish-rustdoc: # Putting spaces at the front and back to ensure we are not matching just any substring, but the # whole space-separated value. - '[[ " ${RUSTDOCS_DEPLOY_REFS} " =~ " ${CI_COMMIT_REF_NAME} " ]] || exit 0' + # setup ssh + - apt-get update && apt-get install -y ssh - rm -rf /tmp/* # Set git config - - rm -rf .git/config + - git config core.sshCommand "ssh -i ${GITHUB_SSH_PRIV_KEY} -F /dev/null -o StrictHostKeyChecking=no" - git config user.email "devops-team@parity.io" - git config user.name "${GITHUB_USER}" - - git config remote.origin.url "https://${GITHUB_TOKEN}@github.com/paritytech/${CI_PROJECT_NAME}.git" + - git config remote.origin.url "git@github.com:/paritytech/${CI_PROJECT_NAME}.git" - git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*" - git fetch origin gh-pages # Install `ejs` and generate index.html based on RUSTDOCS_DEPLOY_REFS @@ -909,7 +914,6 @@ simnet-tests: stage: deploy image: docker.io/paritytech/simnet:${SIMNET_REF} <<: *kubernetes-env - <<: *vault-secrets rules: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never @@ -939,7 +943,6 @@ simnet-tests-quick: stage: deploy image: docker.io/paritytech/simnet:${SIMNET_REF} <<: *kubernetes-env - <<: *vault-secrets <<: *test-refs-no-trigger-prs-only variables: SIMNET_FEATURES: "${SIMNET_FEATURES_PATH}/quick" From 7406442bea0194ffcafc4e8d48d895d4d8d11346 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 24 Nov 2021 10:42:44 +0100 Subject: [PATCH 138/162] [ci] Fix publish-rustdoc job (#10363) --- .gitlab-ci.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8815ab15f640e..0982ca5b4979e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -233,7 +233,7 @@ default: file: false GITHUB_SSH_PRIV_KEY: vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_SSH_PRIV_KEY@kv - file: true + file: false GITHUB_USER: vault: cicd/gitlab/$CI_PROJECT_PATH/GITHUB_USER@kv file: false @@ -814,9 +814,12 @@ publish-rustdoc: - '[[ " ${RUSTDOCS_DEPLOY_REFS} " =~ " ${CI_COMMIT_REF_NAME} " ]] || exit 0' # setup ssh - apt-get update && apt-get install -y ssh + - eval $(ssh-agent) + - ssh-add - <<< ${GITHUB_SSH_PRIV_KEY} + - mkdir ~/.ssh && touch ~/.ssh/known_hosts + - ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts - rm -rf /tmp/* # Set git config - - git config core.sshCommand "ssh -i ${GITHUB_SSH_PRIV_KEY} -F /dev/null -o StrictHostKeyChecking=no" - git config user.email "devops-team@parity.io" - git config user.name "${GITHUB_USER}" - git config remote.origin.url "git@github.com:/paritytech/${CI_PROJECT_NAME}.git" From 3f657a56b3c9bfe14613d213efc6570292ffaf86 Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Wed, 24 Nov 2021 14:10:40 +0300 Subject: [PATCH 139/162] Introduce temporary GitLab & GHA benchmarking jobs (#10311) * Introduce temporary bench job * Add runner label * CI: bench GHA * CI: bench GHA * CI: docs * CI: more docs * CI: run on master only Co-authored-by: Denis P --- .github/workflows/bench_gh_gl.yaml | 75 ++++++++++++++++++++++++++++++ .gitlab-ci.yml | 29 ++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 .github/workflows/bench_gh_gl.yaml diff --git a/.github/workflows/bench_gh_gl.yaml b/.github/workflows/bench_gh_gl.yaml new file mode 100644 index 0000000000000..b243f7f661419 --- /dev/null +++ b/.github/workflows/bench_gh_gl.yaml @@ -0,0 +1,75 @@ +# Please do not tamper with this job, it's a part of benchmarking experiment. +# However, you absolutely can copy it into another file and redo the last job in the pipeline. +# Just make sure you won't introduce long queues to the GHA runner, we have just one at the moment. + +name: bench GHA against GitLab + +on: + push: + branches: [ master ] + +jobs: + bench_gh: + runs-on: self-hosted + env: + CARGO_INCREMENTAL: 0 + RUSTUP_HOME: /usr/local/rustup + CARGO_HOME: /usr/local/cargo + CC: clang + CXX: clang + + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.9.1 + with: + access_token: ${{ github.token }} + + - name: Install dependencies + # Template job, one can copy it to another pipeline. + run: | + apt-get update + apt-get install -y --no-install-recommends time clang + update-alternatives --install /usr/bin/cc cc /usr/bin/clang 100 + + - name: Install Rust + # Template job, one can copy it to another pipeline. + # Referance code https://github.com/paritytech/scripts/blob/master/dockerfiles/base-ci-linux/Dockerfile + # Better keep Rust versions here in sync with the CI image, otherwise the results will conflict. + run: | + curl -L "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init + chmod +x rustup-init + ./rustup-init -y --no-modify-path --profile minimal --default-toolchain stable + rm rustup-init + # add rustup tp PATH so it's usable right away + echo "/usr/local/cargo/bin" >> $GITHUB_PATH + source /usr/local/cargo/env + chmod -R a+w ${RUSTUP_HOME} ${CARGO_HOME} + # install nightly toolchain + rustup toolchain install nightly-2021-11-08 --profile minimal --component rustfmt clippy + # link the pinned toolchain to nightly + ln -s /usr/local/rustup/toolchains/nightly-2021-11-08-x86_64-unknown-linux-gnu /usr/local/rustup/toolchains/nightly-x86_64-unknown-linux-gnu + rustup target add wasm32-unknown-unknown + rustup target add wasm32-unknown-unknown --toolchain nightly + # show versions + rustup show + cargo --version + # remove clutter from the installations + rm -rf "${CARGO_HOME}/registry" "${CARGO_HOME}/git" + + - name: Checkout sources + uses: actions/checkout@v2 + + - name: bench-GHA-test-full-crypto-feature + # GitHub env variables reference: https://docs.github.com/en/actions/learn-github-actions/environment-variables + # The important part of the experiment is the line with `curl`: it sends the job's timing to Prometheus. + run: | + START_TIME=`date '+%s'` + cd primitives/core/ + time cargo +nightly build --verbose --no-default-features --features full_crypto + cd ../application-crypto + time cargo +nightly build --verbose --no-default-features --features full_crypto + END_TIME=`date '+%s'` + TOTAL_TIME=`expr $END_TIME - $START_TIME` + # please remove this line if you want to play with GHA runner. + curl -d "parity_github_job_time{project=\"$GITHUB_REPOSITORY\",job=\"$GITHUB_WORKFLOW\",runner=\"github\"} $TOTAL_TIME" -X POST http://vm-longterm.parity-build.parity.io/api/v1/import/prometheus diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0982ca5b4979e..0b578f19d098b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -538,6 +538,35 @@ test-full-crypto-feature: - time cargo +nightly build --verbose --no-default-features --features full_crypto - sccache -s + +# Mostly same as the job above, additional instrumentation was added to push test run times +# to the time series database. +# This is temporary and will be eventually removed. +bench-test-full-crypto-feature: + stage: test + <<: *docker-env + <<: *build-refs + variables: + <<: *default-vars + RUSTFLAGS: "-Cdebug-assertions=y" + RUST_BACKTRACE: 1 + before_script: [""] + script: + # disable sccache for the bench purposes + - unset RUSTC_WRAPPER + - START_TIME=`date '+%s'` + - cd primitives/core/ + - time cargo +nightly build --verbose --no-default-features --features full_crypto + - cd ../application-crypto + - time cargo +nightly build --verbose --no-default-features --features full_crypto + - END_TIME=`date '+%s'` + - TOTAL_TIME=`expr $END_TIME - $START_TIME` + # send the job time measuring to the prometheus endpoint + - curl -d "parity_gitlab_job_time{project=\"$CI_PROJECT_PATH\",job=\"$CI_JOB_NAME\",runner=\"gitlab\"} $TOTAL_TIME" -X POST $VM_LONGTERM_URI/api/v1/import/prometheus + tags: + - linux-docker-compare + + test-wasmer-sandbox: stage: test <<: *docker-env From d732a9d82de5df570a8a849b4367365a3e18075c Mon Sep 17 00:00:00 2001 From: Alexander Popiak Date: Wed, 24 Nov 2021 14:39:11 +0100 Subject: [PATCH 140/162] derive Debug for CrateVersion and StorageVersion (#10355) --- frame/support/src/traits/metadata.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index 0da76f7585aca..50fb53fc57063 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -135,7 +135,7 @@ pub trait GetCallMetadata { } /// The version of a crate. -#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Clone, Copy, Default)] +#[derive(Debug, Eq, PartialEq, Encode, Decode, Clone, Copy, Default)] pub struct CrateVersion { /// The major version of the crate. pub major: u16, @@ -175,7 +175,7 @@ pub const STORAGE_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__STORAGE_VERSION__:"; /// /// Each storage version of a pallet is stored in the state under a fixed key. See /// [`STORAGE_VERSION_STORAGE_KEY_POSTFIX`] for how this key is built. -#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy, PartialOrd, Default)] +#[derive(Debug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy, PartialOrd, Default)] pub struct StorageVersion(u16); impl StorageVersion { From c519f0f11059e14ab40c868429ba91e7d2525f33 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 24 Nov 2021 16:00:03 +0100 Subject: [PATCH 141/162] [ci] Fix publish-rustdoc (#10364) --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0b578f19d098b..b1987b834fef0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -842,12 +842,12 @@ publish-rustdoc: # whole space-separated value. - '[[ " ${RUSTDOCS_DEPLOY_REFS} " =~ " ${CI_COMMIT_REF_NAME} " ]] || exit 0' # setup ssh + # FIXME: add ssh to docker image - apt-get update && apt-get install -y ssh - eval $(ssh-agent) - ssh-add - <<< ${GITHUB_SSH_PRIV_KEY} - mkdir ~/.ssh && touch ~/.ssh/known_hosts - ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts - - rm -rf /tmp/* # Set git config - git config user.email "devops-team@parity.io" - git config user.name "${GITHUB_USER}" From e9fca0f4d7a65998206f29baf0133a7931452104 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 24 Nov 2021 18:20:55 +0100 Subject: [PATCH 142/162] Stabilize "seal1" seal_call (#10366) --- frame/contracts/src/wasm/mod.rs | 9 +++------ frame/contracts/src/wasm/runtime.rs | 4 ++-- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 10aa0d19a04f7..6a807710a5265 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -541,11 +541,10 @@ mod tests { } #[test] - #[cfg(feature = "unstable-interface")] fn contract_call_forward_input() { const CODE: &str = r#" (module - (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "env" "memory" (memory 1 1)) (func (export "call") @@ -596,11 +595,10 @@ mod tests { } #[test] - #[cfg(feature = "unstable-interface")] fn contract_call_clone_input() { const CODE: &str = r#" (module - (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -652,11 +650,10 @@ mod tests { } #[test] - #[cfg(feature = "unstable-interface")] fn contract_call_tail_call() { const CODE: &str = r#" (module - (import "__unstable__" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) (func (export "call") (drop diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 883dfd0802483..204db09ed3e8d 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -928,7 +928,7 @@ define_env!(Env, , // `ReturnCode::BelowSubsistenceThreshold` // `ReturnCode::TransferFailed` // `ReturnCode::NotCallable` - [__unstable__] seal_call( + [seal1] seal_call( ctx, flags: u32, callee_ptr: u32, @@ -940,7 +940,7 @@ define_env!(Env, , output_len_ptr: u32 ) -> ReturnCode => { ctx.call( - CallFlags::from_bits(flags).ok_or_else(|| "used rerved bit in CallFlags")?, + CallFlags::from_bits(flags).ok_or_else(|| "used reserved bit in CallFlags")?, callee_ptr, gas, value_ptr, From 5e2bb785dd8858c12b2c7f85f59d143507bd9819 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 24 Nov 2021 20:46:28 +0100 Subject: [PATCH 143/162] Enable wasmtime for aarch64 as well (#10367) --- bin/node/cli/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 1bb3671d42bae..42df74100415d 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -96,7 +96,7 @@ frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../.. node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } -[target.'cfg(target_arch="x86_64")'.dependencies] +[target.'cfg(any(target_arch="x86_64", target_arch="aarch64"))'.dependencies] node-executor = { version = "3.0.0-dev", path = "../executor", features = [ "wasmtime", ] } From 3009d322dd2f4ab019afa7b54baa02ff91be0191 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 25 Nov 2021 09:30:07 +0100 Subject: [PATCH 144/162] Don't send ForceClose repeatedly in send_sync_notification (#10348) --- .../src/protocol/notifications/handler.rs | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/client/network/src/protocol/notifications/handler.rs b/client/network/src/protocol/notifications/handler.rs index a0c49fa592b21..db0385bea8f69 100644 --- a/client/network/src/protocol/notifications/handler.rs +++ b/client/network/src/protocol/notifications/handler.rs @@ -365,10 +365,12 @@ struct NotificationsSinkInner { /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. async_channel: FuturesMutex>, /// Sender to use in synchronous contexts. Uses a synchronous mutex. + /// Contains `None` if the channel was full at some point, in which case the channel will + /// be closed in the near future anyway. /// This channel has a large capacity and is meant to be used in contexts where /// back-pressure cannot be properly exerted. /// It will be removed in a future version. - sync_channel: Mutex>, + sync_channel: Mutex>>, } /// Message emitted through the [`NotificationsSink`] and processed by the background task @@ -400,14 +402,20 @@ impl NotificationsSink { /// This method will be removed in a future version. pub fn send_sync_notification<'a>(&'a self, message: impl Into>) { let mut lock = self.inner.sync_channel.lock(); - let result = - lock.try_send(NotificationsSinkMessage::Notification { message: message.into() }); - - if result.is_err() { - // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the - // buffer, and therefore `try_send` will succeed. - let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); - debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + + if let Some(tx) = lock.as_mut() { + let result = + tx.try_send(NotificationsSinkMessage::Notification { message: message.into() }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore `try_send` will succeed. + let _result2 = tx.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + + // Destroy the sender in order to not send more `ForceClose` messages. + *lock = None; + } } } @@ -554,7 +562,7 @@ impl ProtocolsHandler for NotifsHandler { inner: Arc::new(NotificationsSinkInner { peer_id: self.peer_id, async_channel: FuturesMutex::new(async_tx), - sync_channel: Mutex::new(sync_tx), + sync_channel: Mutex::new(Some(sync_tx)), }), }; From 4617267aa82f63c0082592c4cd0f912bbc52b0ef Mon Sep 17 00:00:00 2001 From: Roman Date: Thu, 25 Nov 2021 11:33:33 +0300 Subject: [PATCH 145/162] Bump libp2p to 0.40.0 (#10035) * Bump libp2p to 0.40.0-rc.1 * Fix PingFailure import * Reduce the number of compilation errors (this is a FIXME commit) * Bump libp2p to 0.40.0-rc.2 * Fix sc-network::Behaviour to inject events into fields * Fix some NetworkBehaviourAction types * More fixes * More fixes * More fixes * Fix DiscoveryBehaviour * Fix PeerInfoBehaviour * Fix RequestResponsesBehaviour * Fix RequestResponsesBehaviour * Fix Notifications * Fix NetworkWorker * Fix Behaviour * Please borrowchk * Please borrowchk * Please borrowchk * Fix fmt * Cover all cases in matches * Fix some clippy warnings * Fix into_peer_id -> to_peer_id * Fix some warnings * Fix some inject_dial_failure FIXMEs * Fix DiscoveryBehaviour::inject_dial_failure * Fix RequestResponsesBehaviour::inject_dial_failure * Fix the order of inject_connection_closed PeerInfoBehaviour events * Make KademliaEvent with filtering unreachable * Fix Notifications::inject_dial_failure * Use concurrent_dial_errors in NetworkWorker * Remove commented-out RequestResponsesBehaviour::inject_addr_reach_failure * Fix tests * Dont report new PendingConnectionError and DialError variants to metrics * Bump libp2p to 0.40.0 * Add fn inject_listen_failure and inject_address_change * Review fixes --- Cargo.lock | 388 ++++++++---------- client/authority-discovery/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/cli/src/commands/build_spec_cmd.rs | 2 +- client/cli/src/commands/generate_node_key.rs | 2 +- client/cli/src/commands/inspect_node_key.rs | 2 +- client/consensus/common/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 4 +- client/network/src/behaviour.rs | 12 +- client/network/src/bitswap.rs | 14 +- client/network/src/discovery.rs | 135 ++++-- client/network/src/peer_info.rs | 98 +++-- client/network/src/protocol.rs | 36 +- .../src/protocol/notifications/behaviour.rs | 133 +++--- .../src/protocol/notifications/tests.rs | 38 +- client/network/src/request_responses.rs | 89 ++-- client/network/src/service.rs | 89 ++-- client/network/test/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- 21 files changed, 564 insertions(+), 492 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0091223b09e6a..3e956ea462ee7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -545,6 +545,12 @@ dependencies = [ "sp-std", ] +[[package]] +name = "bimap" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" + [[package]] name = "bincode" version = "1.3.2" @@ -1592,6 +1598,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +[[package]] +name = "dtoa" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" + [[package]] name = "dyn-clonable" version = "0.9.0" @@ -1874,12 +1886,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "fixedbitset" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" - [[package]] name = "fixedbitset" version = "0.4.0" @@ -3388,9 +3394,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.39.1" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9004c06878ef8f3b4b4067e69a140d87ed20bf777287f82223e49713b36ee433" +checksum = "3bec54343492ba5940a6c555e512c6721139835d28c59bc22febece72dfd0d9d" dependencies = [ "atomic", "bytes 1.0.1", @@ -3404,12 +3410,14 @@ dependencies = [ "libp2p-identify", "libp2p-kad", "libp2p-mdns", + "libp2p-metrics", "libp2p-mplex", "libp2p-noise", "libp2p-ping", "libp2p-plaintext", "libp2p-pnet", "libp2p-relay", + "libp2p-rendezvous", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", @@ -3427,9 +3435,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59" +checksum = "bef22d9bba1e8bcb7ec300073e6802943fe8abb8190431842262b5f1c30abba1" dependencies = [ "asn1_der", "bs58", @@ -3439,16 +3447,16 @@ dependencies = [ "futures 0.3.16", "futures-timer 3.0.2", "lazy_static", - "libsecp256k1 0.5.0", + "libsecp256k1", "log 0.4.14", "multiaddr", "multihash 0.14.0", "multistream-select", "parking_lot 0.11.1", "pin-project 1.0.8", - "prost 0.8.0", - "prost-build 0.8.0", - "rand 0.7.3", + "prost", + "prost-build", + "rand 0.8.4", "ring", "rw-stream-sink", "sha2 0.9.8", @@ -3461,9 +3469,9 @@ dependencies = [ [[package]] name = "libp2p-deflate" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66097fccc0b7f8579f90a03ea76ba6196332ea049fd07fd969490a06819dcdc8" +checksum = "51a800adb195f33de63f4b17b63fe64cfc23bf2c6a0d3d0d5321328664e65197" dependencies = [ "flate2", "futures 0.3.16", @@ -3472,9 +3480,9 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ff08b3196b85a17f202d80589e93b1660a574af67275706657fdc762e42c32" +checksum = "bb8f89d15cb6e3c5bc22afff7513b11bab7856f2872d3cfba86f7f63a06bc498" dependencies = [ "async-std-resolver", "futures 0.3.16", @@ -3486,9 +3494,9 @@ dependencies = [ [[package]] name = "libp2p-floodsub" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "404eca8720967179dac7a5b4275eb91f904a53859c69ca8d018560ad6beb214f" +checksum = "aab3d7210901ea51b7bae2b581aa34521797af8c4ec738c980bda4a06434067f" dependencies = [ "cuckoofilter", "fnv", @@ -3496,17 +3504,17 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost 0.8.0", - "prost-build 0.8.0", + "prost", + "prost-build", "rand 0.7.3", "smallvec 1.7.0", ] [[package]] name = "libp2p-gossipsub" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1cc48709bcbc3a3321f08a73560b4bbb4166a7d56f6fdb615bc775f4f91058e" +checksum = "dfeead619eb5dac46e65acc78c535a60aaec803d1428cca6407c3a4fc74d698d" dependencies = [ "asynchronous-codec 0.6.0", "base64 0.13.0", @@ -3518,8 +3526,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost 0.8.0", - "prost-build 0.8.0", + "prost", + "prost-build", "rand 0.7.3", "regex", "sha2 0.9.8", @@ -3530,25 +3538,26 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7b61f6cf07664fb97016c318c4d4512b3dd4cc07238607f3f0163245f99008e" +checksum = "cca1275574183f288ff8b72d535d5ffa5ea9292ef7829af8b47dcb197c7b0dcd" dependencies = [ "futures 0.3.16", "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost 0.8.0", - "prost-build 0.8.0", + "lru 0.6.6", + "prost", + "prost-build", "smallvec 1.7.0", "wasm-timer", ] [[package]] name = "libp2p-kad" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50ed78489c87924235665a0ab345b298ee34dff0f7ad62c0ba6608b2144fb75e" +checksum = "a2297dc0ca285f3a09d1368bde02449e539b46f94d32d53233f53f6625bcd3ba" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec 0.6.0", @@ -3559,8 +3568,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "prost 0.8.0", - "prost-build 0.8.0", + "prost", + "prost-build", "rand 0.7.3", "sha2 0.9.8", "smallvec 1.7.0", @@ -3572,9 +3581,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a29e6cbc2a24b8471b6567e580a0e8e7b70a6d0f0ea2be0844d1e842d7d4fa33" +checksum = "14c864b64bdc8a84ff3910a0df88e6535f256191a450870f1e7e10cbf8e64d45" dependencies = [ "async-io", "data-encoding", @@ -3591,11 +3600,25 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-metrics" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4af432fcdd2f8ba4579b846489f8f0812cfd738ced2c0af39df9b1c48bbb6ab2" +dependencies = [ + "libp2p-core", + "libp2p-identify", + "libp2p-kad", + "libp2p-ping", + "libp2p-swarm", + "open-metrics-client", +] + [[package]] name = "libp2p-mplex" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "313d9ea526c68df4425f580024e67a9d3ffd49f2c33de5154b1f5019816f7a99" +checksum = "7f2cd64ef597f40e14bfce0497f50ecb63dd6d201c61796daeb4227078834fbf" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", @@ -3611,9 +3634,9 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1db7212f342b6ba7c981cc40e31f76e9e56cb48e65fa4c142ecaca5839523e" +checksum = "a8772c7a99088221bb7ca9c5c0574bf55046a7ab4c319f3619b275f28c8fb87a" dependencies = [ "bytes 1.0.1", "curve25519-dalek 3.0.2", @@ -3621,8 +3644,8 @@ dependencies = [ "lazy_static", "libp2p-core", "log 0.4.14", - "prost 0.8.0", - "prost-build 0.8.0", + "prost", + "prost-build", "rand 0.8.4", "sha2 0.9.8", "snow", @@ -3633,9 +3656,9 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2482cfd9eb0b7a0baaf3e7b329dc4f2785181a161b1a47b7192f8d758f54a439" +checksum = "80ef7b0ec5cf06530d9eb6cf59ae49d46a2c45663bde31c25a12f682664adbcf" dependencies = [ "futures 0.3.16", "libp2p-core", @@ -3648,26 +3671,26 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13b4783e5423870b9a5c199f65a7a3bc66d86ab56b2b9beebf3c338d889cf8e4" +checksum = "5fba1a6ff33e4a274c89a3b1d78b9f34f32af13265cc5c46c16938262d4e945a" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", "futures 0.3.16", "libp2p-core", "log 0.4.14", - "prost 0.8.0", - "prost-build 0.8.0", + "prost", + "prost-build", "unsigned-varint 0.7.0", "void", ] [[package]] name = "libp2p-pnet" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07cb4dd4b917e5b40ddefe49b96b07adcd8d342e0317011d175b7b2bb1dcc974" +checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" dependencies = [ "futures 0.3.16", "log 0.4.14", @@ -3679,9 +3702,9 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0133f6cfd81cdc16e716de2982e012c62e6b9d4f12e41967b3ee361051c622aa" +checksum = "2852b61c90fa8ce3c8fcc2aba76e6cefc20d648f9df29157d6b3a916278ef3e3" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", @@ -3691,8 +3714,8 @@ dependencies = [ "libp2p-swarm", "log 0.4.14", "pin-project 1.0.8", - "prost 0.8.0", - "prost-build 0.8.0", + "prost", + "prost-build", "rand 0.7.3", "smallvec 1.7.0", "unsigned-varint 0.7.0", @@ -3700,11 +3723,33 @@ dependencies = [ "wasm-timer", ] +[[package]] +name = "libp2p-rendezvous" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14a6d2b9e7677eff61dc3d2854876aaf3976d84a01ef6664b610c77a0c9407c5" +dependencies = [ + "asynchronous-codec 0.6.0", + "bimap", + "futures 0.3.16", + "libp2p-core", + "libp2p-swarm", + "log 0.4.14", + "prost", + "prost-build", + "rand 0.8.4", + "sha2 0.9.8", + "thiserror", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + [[package]] name = "libp2p-request-response" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cdae44b6821466123af93cbcdec7c9e6ba9534a8af9cdc296446d39416d241" +checksum = "a877a4ced6d46bf84677e1974e8cf61fb434af73b2e96fb48d6cb6223a4634d8" dependencies = [ "async-trait", "bytes 1.0.1", @@ -3712,8 +3757,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "lru 0.6.6", - "minicbor", + "lru 0.7.0", "rand 0.7.3", "smallvec 1.7.0", "unsigned-varint 0.7.0", @@ -3722,9 +3766,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7083861341e1555467863b4cd802bea1e8c4787c0f7b5110097d0f1f3248f9a9" +checksum = "3f5184a508f223bc100a12665517773fb8730e9f36fc09eefb670bf01b107ae9" dependencies = [ "either", "futures 0.3.16", @@ -3738,9 +3782,9 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab8cb308d4fc854869f5abb54fdab0833d2cf670d407c745849dc47e6e08d79c" +checksum = "072c290f727d39bdc4e9d6d1c847978693d25a673bd757813681e33e5f6c00c2" dependencies = [ "quote", "syn", @@ -3748,9 +3792,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79edd26b6b4bb5feee210dcda562dca186940dfecb0024b979c3f50824b3bf28" +checksum = "7399c5b6361ef525d41c11fcf51635724f832baf5819b30d3d873eabb4fbae4b" dependencies = [ "async-io", "futures 0.3.16", @@ -3765,9 +3809,9 @@ dependencies = [ [[package]] name = "libp2p-uds" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280e793440dd4e9f273d714f4497325c72cddb0fe85a49f9a03c88f41dd20182" +checksum = "b8b7563e46218165dfd60f64b96f7ce84590d75f53ecbdc74a7dd01450dc5973" dependencies = [ "async-std", "futures 0.3.16", @@ -3777,9 +3821,9 @@ dependencies = [ [[package]] name = "libp2p-wasm-ext" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f553b7140fad3d7a76f50497b0ea591e26737d9607428a75509fc191e4d1b1f6" +checksum = "1008a302b73c5020251f9708c653f5ed08368e530e247cc9cd2f109ff30042cf" dependencies = [ "futures 0.3.16", "js-sys", @@ -3791,9 +3835,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddf99dcbf5063e9d59087f61b1e85c686ceab2f5abedb472d32288065c0e5e27" +checksum = "22e12df82d1ed64969371a9e65ea92b91064658604cc2576c2757f18ead9a1cf" dependencies = [ "either", "futures 0.3.16", @@ -3802,16 +3846,16 @@ dependencies = [ "log 0.4.14", "quicksink", "rw-stream-sink", - "soketto 0.4.2", + "soketto 0.7.0", "url 2.2.1", "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "214cc0dd9c37cbed27f0bb1eba0c41bbafdb93a8be5e9d6ae1e6b4b42cd044bf" +checksum = "4e7362abb8867d7187e7e93df17f460d554c997fc5c8ac57dc1259057f6889af" dependencies = [ "futures 0.3.16", "libp2p-core", @@ -3832,25 +3876,6 @@ dependencies = [ "libc", ] -[[package]] -name = "libsecp256k1" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" -dependencies = [ - "arrayref", - "base64 0.12.3", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core 0.2.2", - "libsecp256k1-gen-ecmult 0.2.1", - "libsecp256k1-gen-genmult 0.2.1", - "rand 0.7.3", - "serde", - "sha2 0.9.8", - "typenum", -] - [[package]] name = "libsecp256k1" version = "0.7.0" @@ -3861,26 +3886,15 @@ dependencies = [ "base64 0.13.0", "digest 0.9.0", "hmac-drbg", - "libsecp256k1-core 0.3.0", - "libsecp256k1-gen-ecmult 0.3.0", - "libsecp256k1-gen-genmult 0.3.0", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", "rand 0.8.4", "serde", "sha2 0.9.8", "typenum", ] -[[package]] -name = "libsecp256k1-core" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - [[package]] name = "libsecp256k1-core" version = "0.3.0" @@ -3892,31 +3906,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" -dependencies = [ - "libsecp256k1-core 0.2.2", -] - [[package]] name = "libsecp256k1-gen-ecmult" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" dependencies = [ - "libsecp256k1-core 0.3.0", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" -dependencies = [ - "libsecp256k1-core 0.2.2", + "libsecp256k1-core", ] [[package]] @@ -3925,7 +3921,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" dependencies = [ - "libsecp256k1-core 0.3.0", + "libsecp256k1-core", ] [[package]] @@ -4205,26 +4201,6 @@ dependencies = [ "log 0.3.9", ] -[[package]] -name = "minicbor" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea79ce4ab9f445ec6b71833a2290ac0a29c9dde0fa7cae4c481eecae021d9bd9" -dependencies = [ - "minicbor-derive", -] - -[[package]] -name = "minicbor-derive" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce18b5423c573a13e80cb3046ea0af6379ef725dc3af4886bdb8f4e5093068" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "miniz_oxide" version = "0.4.4" @@ -5067,6 +5043,29 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "open-metrics-client" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7337d80c23c2d8b1349563981bc4fb531220733743ba8115454a67b181173f0d" +dependencies = [ + "dtoa", + "itoa", + "open-metrics-client-derive-text-encode", + "owning_ref", +] + +[[package]] +name = "open-metrics-client-derive-text-encode" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c83b586f00268c619c1cb3340ec1a6f59dd9ba1d9833a273a68e6d5cd8ffc" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl" version = "0.10.35" @@ -5345,7 +5344,7 @@ dependencies = [ "frame-system", "hex", "hex-literal", - "libsecp256k1 0.7.0", + "libsecp256k1", "log 0.4.14", "pallet-beefy", "pallet-mmr", @@ -5405,7 +5404,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", - "libsecp256k1 0.7.0", + "libsecp256k1", "log 0.4.14", "pallet-balances", "pallet-contracts-primitives", @@ -6557,23 +6556,13 @@ dependencies = [ "sha-1 0.8.2", ] -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset 0.2.0", - "indexmap", -] - [[package]] name = "petgraph" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" dependencies = [ - "fixedbitset 0.4.0", + "fixedbitset", "indexmap", ] @@ -6847,16 +6836,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "prost" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" -dependencies = [ - "bytes 1.0.1", - "prost-derive 0.8.0", -] - [[package]] name = "prost" version = "0.9.0" @@ -6864,25 +6843,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes 1.0.1", - "prost-derive 0.9.0", -] - -[[package]] -name = "prost-build" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" -dependencies = [ - "bytes 1.0.1", - "heck", - "itertools", - "log 0.4.14", - "multimap", - "petgraph 0.5.1", - "prost 0.8.0", - "prost-types 0.8.0", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -6897,27 +6858,14 @@ dependencies = [ "lazy_static", "log 0.4.14", "multimap", - "petgraph 0.6.0", - "prost 0.9.0", - "prost-types 0.9.0", + "petgraph", + "prost", + "prost-types", "regex", "tempfile", "which", ] -[[package]] -name = "prost-derive" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.9.0" @@ -6931,16 +6879,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" -dependencies = [ - "bytes 1.0.1", - "prost 0.8.0", -] - [[package]] name = "prost-types" version = "0.9.0" @@ -6948,7 +6886,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes 1.0.1", - "prost 0.9.0", + "prost", ] [[package]] @@ -7594,9 +7532,9 @@ checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" [[package]] name = "salsa20" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" +checksum = "0c0fbb5f676da676c260ba276a8f43a8dc67cf02d1438423aeb1c677a7212686" dependencies = [ "cipher", ] @@ -7632,8 +7570,8 @@ dependencies = [ "libp2p", "log 0.4.14", "parity-scale-codec", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8068,7 +8006,7 @@ version = "0.10.0-dev" dependencies = [ "hex-literal", "lazy_static", - "libsecp256k1 0.7.0", + "libsecp256k1", "log 0.4.14", "parity-scale-codec", "parking_lot 0.11.1", @@ -8282,8 +8220,8 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.8", - "prost 0.9.0", - "prost-build 0.9.0", + "prost", + "prost-build", "quickcheck", "rand 0.7.3", "sc-block-builder", @@ -9159,7 +9097,6 @@ checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" dependencies = [ "base64 0.12.3", "bytes 0.5.6", - "flate2", "futures 0.3.16", "httparse", "log 0.4.14", @@ -9175,6 +9112,7 @@ checksum = "083624472e8817d44d02c0e55df043737ff11f279af924abdf93845717c2b75c" dependencies = [ "base64 0.13.0", "bytes 1.0.1", + "flate2", "futures 0.3.16", "httparse", "log 0.4.14", @@ -9442,7 +9380,7 @@ dependencies = [ "hex-literal", "impl-serde", "lazy_static", - "libsecp256k1 0.7.0", + "libsecp256k1", "log 0.4.14", "merlin", "num-traits", @@ -9562,7 +9500,7 @@ version = "4.0.0-dev" dependencies = [ "futures 0.3.16", "hash-db", - "libsecp256k1 0.7.0", + "libsecp256k1", "log 0.4.14", "parity-scale-codec", "parking_lot 0.11.1", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index b6f9b8450d861..f3169e82fb3c8 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -23,7 +23,7 @@ derive_more = "0.99.16" futures = "0.3.9" futures-timer = "3.0.1" ip_network = "0.4.0" -libp2p = { version = "0.39.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.40.0", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.10.0-dev" } prost = "0.9" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 8667a5e876c14..dddd48cb2f142 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -18,7 +18,7 @@ regex = "1.5.4" tokio = { version = "1.13", features = [ "signal", "rt-multi-thread" ] } futures = "0.3.9" fdlimit = "0.2.1" -libp2p = "0.39.1" +libp2p = "0.40.0" parity-scale-codec = "2.3.1" hex = "0.4.2" rand = "0.7.3" diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 75fdf07643ee2..5e43a26a1e67d 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -65,7 +65,7 @@ impl BuildSpecCmd { if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { let keys = network_config.node_key.into_keypair()?; - let peer_id = keys.public().into_peer_id(); + let peer_id = keys.public().to_peer_id(); let addr = MultiaddrWithPeerId { multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)], peer_id, diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index 74a4197f36621..cc26bb73240ac 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -42,7 +42,7 @@ impl GenerateNodeKeyCmd { pub fn run(&self) -> Result<(), Error> { let keypair = libp2p_ed25519::Keypair::generate(); let secret = keypair.secret(); - let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); + let peer_id = PublicKey::Ed25519(keypair.public()).to_peer_id(); let secret_hex = hex::encode(secret.as_ref()); match &self.file { diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs index 92a71f8975052..7527fbe4315ae 100644 --- a/client/cli/src/commands/inspect_node_key.rs +++ b/client/cli/src/commands/inspect_node_key.rs @@ -47,7 +47,7 @@ impl InspectNodeKeyCmd { ed25519::SecretKey::from_bytes(&mut file_content).map_err(|_| "Bad node key file")?; let keypair = ed25519::Keypair::from(secret); - let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); + let peer_id = PublicKey::Ed25519(keypair.public()).to_peer_id(); println!("{}", peer_id); diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index c26e250edf24c..308028c85d23c 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.30" -libp2p = { version = "0.39.1", default-features = false } +libp2p = { version = "0.40.0", default-features = false } log = "0.4.8" futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index e11cb8dbe85d6..840cb50c8c9fc 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.39.1", default-features = false } +libp2p = { version = "0.40.0", default-features = false } log = "0.4.8" lru = "0.7.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 527a18dc7755b..8ee4d7f145726 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -64,11 +64,11 @@ unsigned-varint = { version = "0.6.0", features = [ ] } void = "1.0.2" zeroize = "1.4.2" -libp2p = "0.39.1" +libp2p = "0.40.0" [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.39.1", default-features = false } +libp2p = { version = "0.40.0", default-features = false } quickcheck = "1.0.3" rand = "0.7.2" sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index e2b950cf67e8c..a6a67d7bce2d1 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -32,7 +32,10 @@ use libp2p::{ core::{Multiaddr, PeerId, PublicKey}, identify::IdentifyInfo, kad::record, - swarm::{toggle::Toggle, NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}, + swarm::{ + toggle::Toggle, NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, + PollParameters, + }, NetworkBehaviour, }; use log::debug; @@ -58,7 +61,7 @@ pub use crate::request_responses::{ /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourOut", poll_method = "poll")] +#[behaviour(out_event = "BehaviourOut", poll_method = "poll", event_process = true)] pub struct Behaviour { /// All the substrate-specific protocols. substrate: Protocol, @@ -512,11 +515,12 @@ impl NetworkBehaviourEventProcess for Behaviour { } impl Behaviour { - fn poll( + fn poll( &mut self, _cx: &mut Context, _: &mut impl PollParameters, - ) -> Poll>> { + ) -> Poll, ::ProtocolsHandler>> + { if let Some(event) = self.events.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) } diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs index 6b53dce626505..e6cb1d9d79e3a 100644 --- a/client/network/src/bitswap.rs +++ b/client/network/src/bitswap.rs @@ -39,8 +39,7 @@ use libp2p::{ UpgradeInfo, }, swarm::{ - IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, - OneShotHandler, PollParameters, ProtocolsHandler, + NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, OneShotHandler, PollParameters, }, }; use log::{debug, error, trace}; @@ -297,12 +296,11 @@ impl NetworkBehaviour for Bitswap { self.ready_blocks.push_back((peer, response)); } - fn poll(&mut self, _ctx: &mut Context, _: &mut impl PollParameters) -> Poll< - NetworkBehaviourAction< - <::Handler as ProtocolsHandler>::InEvent, - Self::OutEvent, - >, - >{ + fn poll( + &mut self, + _ctx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll> { if let Some((peer_id, message)) = self.ready_blocks.pop_front() { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 2a4b25a621e04..dc08ab57ed3f6 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -67,8 +67,8 @@ use libp2p::{ mdns::{Mdns, MdnsConfig, MdnsEvent}, multiaddr::Protocol, swarm::{ - protocols_handler::multi::IntoMultiHandler, IntoProtocolsHandler, NetworkBehaviour, - NetworkBehaviourAction, PollParameters, ProtocolsHandler, + protocols_handler::multi::IntoMultiHandler, DialError, IntoProtocolsHandler, + NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler, }, }; use log::{debug, error, info, trace, warn}; @@ -107,7 +107,7 @@ impl DiscoveryConfig { /// Create a default configuration with the given public key. pub fn new(local_public_key: PublicKey) -> Self { Self { - local_peer_id: local_public_key.into_peer_id(), + local_peer_id: local_public_key.to_peer_id(), permanent_addresses: Vec::new(), dht_random_walk: true, allow_private_ipv4: true, @@ -428,6 +428,29 @@ impl DiscoveryBehaviour { }; ip.is_global() } + + fn new_handler_with_replacement( + &mut self, + pid: ProtocolId, + handler: KademliaHandlerProto, + ) -> ::ProtocolsHandler { + let mut handlers: HashMap<_, _> = self + .kademlias + .iter_mut() + .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))) + .collect(); + + if let Some(h) = handlers.get_mut(&pid) { + *h = handler + } + + IntoMultiHandler::try_from_iter(handlers).expect( + "There can be at most one handler per `ProtocolId` and protocol names contain the \ + `ProtocolId` so no two protocol names in `self.kademlias` can be equal which is the \ + only error `try_from_iter` can return, therefore this call is guaranteed to succeed; \ + qed", + ) + } } /// Event generated by the `DiscoveryBehaviour`. @@ -527,15 +550,34 @@ impl NetworkBehaviour for DiscoveryBehaviour { list } + fn inject_address_change( + &mut self, + peer_id: &PeerId, + connection_id: &ConnectionId, + old: &ConnectedPoint, + new: &ConnectedPoint, + ) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_address_change(k, peer_id, connection_id, old, new); + } + } + fn inject_connection_established( &mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + failed_addresses: Option<&Vec>, ) { self.num_connections += 1; for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_connection_established(k, peer_id, conn, endpoint) + NetworkBehaviour::inject_connection_established( + k, + peer_id, + conn, + endpoint, + failed_addresses, + ) } } @@ -547,14 +589,13 @@ impl NetworkBehaviour for DiscoveryBehaviour { fn inject_connection_closed( &mut self, - peer_id: &PeerId, - conn: &ConnectionId, - endpoint: &ConnectedPoint, + _peer_id: &PeerId, + _conn: &ConnectionId, + _endpoint: &ConnectedPoint, + _handler: ::Handler, ) { self.num_connections -= 1; - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_connection_closed(k, peer_id, conn, endpoint) - } + // NetworkBehaviour::inject_connection_closed on Kademlia does nothing. } fn inject_disconnected(&mut self, peer_id: &PeerId) { @@ -563,20 +604,25 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_addr_reach_failure( + fn inject_dial_failure( &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error, + peer_id: Option, + _: Self::ProtocolsHandler, + error: &DialError, ) { if let Some(peer_id) = peer_id { - if let Some(list) = self.ephemeral_addresses.get_mut(peer_id) { - list.retain(|a| a != addr); + if let DialError::Transport(errors) = error { + if let Some(list) = self.ephemeral_addresses.get_mut(&peer_id) { + for (addr, _error) in errors { + list.retain(|a| a != addr); + } + } } } for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_addr_reach_failure(k, peer_id, addr, error) + let handler = k.new_handler(); + NetworkBehaviour::inject_dial_failure(k, peer_id, handler, error); } } @@ -631,12 +677,6 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_dial_failure(k, peer_id) - } - } - fn inject_new_listener(&mut self, id: ListenerId) { for k in self.kademlias.values_mut() { NetworkBehaviour::inject_new_listener(k, id) @@ -649,6 +689,10 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } + fn inject_listen_failure(&mut self, _: &Multiaddr, _: &Multiaddr, _: Self::ProtocolsHandler) { + // NetworkBehaviour::inject_listen_failure on Kademlia does nothing. + } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { for k in self.kademlias.values_mut() { NetworkBehaviour::inject_listener_error(k, id, err) @@ -665,12 +709,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll< - NetworkBehaviourAction< - <::Handler as ProtocolsHandler>::InEvent, - Self::OutEvent, - >, - >{ + ) -> Poll> { // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) @@ -731,6 +770,10 @@ impl NetworkBehaviour for DiscoveryBehaviour { let ev = DiscoveryOut::Discovered(peer); return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, + KademliaEvent::InboundPutRecordRequest { .. } | + KademliaEvent::InboundAddProviderRequest { .. } => { + debug_assert!(false, "We don't use kad filtering at the moment"); + }, KademliaEvent::PendingRoutablePeer { .. } | KademliaEvent::InboundRequestServed { .. } => { // We are not interested in this event at the moment. @@ -847,10 +890,20 @@ impl NetworkBehaviour for DiscoveryBehaviour { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) }, }, - NetworkBehaviourAction::DialAddress { address } => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - NetworkBehaviourAction::DialPeer { peer_id, condition } => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + NetworkBehaviourAction::DialAddress { address, handler } => { + let pid = pid.clone(); + let handler = self.new_handler_with_replacement(pid, handler); + return Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }) + }, + NetworkBehaviourAction::DialPeer { peer_id, condition, handler } => { + let pid = pid.clone(); + let handler = self.new_handler_with_replacement(pid, handler); + return Poll::Ready(NetworkBehaviourAction::DialPeer { + peer_id, + condition, + handler, + }) + }, NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, @@ -888,10 +941,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { }, MdnsEvent::Expired(_) => {}, }, - NetworkBehaviourAction::DialAddress { address } => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - NetworkBehaviourAction::DialPeer { peer_id, condition } => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + NetworkBehaviourAction::DialAddress { .. } => { + unreachable!("mDNS never dials!"); + }, + NetworkBehaviourAction::DialPeer { .. } => { + unreachable!("mDNS never dials!"); + }, NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ NetworkBehaviourAction::ReportObservedAddr { address, score } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { @@ -940,7 +995,7 @@ impl MdnsWrapper { &mut self, cx: &mut Context<'_>, params: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll::ProtocolsHandler>> { loop { match self { Self::Instantiating(fut) => @@ -1007,13 +1062,13 @@ mod tests { config.finish() }; - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let mut swarm = Swarm::new(transport, behaviour, keypair.public().to_peer_id()); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); if i == 0 { first_swarm_peer_id_and_addr = - Some((keypair.public().into_peer_id(), listen_addr.clone())) + Some((keypair.public().to_peer_id(), listen_addr.clone())) } swarm.listen_on(listen_addr.clone()).unwrap(); diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index 141cc59247d1a..2c37fdb460a76 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -186,6 +186,17 @@ impl NetworkBehaviour for PeerInfoBehaviour { list } + fn inject_address_change( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + old: &ConnectedPoint, + new: &ConnectedPoint, + ) { + self.ping.inject_address_change(peer_id, conn, old, new); + self.identify.inject_address_change(peer_id, conn, old, new); + } + fn inject_connected(&mut self, peer_id: &PeerId) { self.ping.inject_connected(peer_id); self.identify.inject_connected(peer_id); @@ -196,9 +207,12 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + failed_addresses: Option<&Vec>, ) { - self.ping.inject_connection_established(peer_id, conn, endpoint); - self.identify.inject_connection_established(peer_id, conn, endpoint); + self.ping + .inject_connection_established(peer_id, conn, endpoint, failed_addresses); + self.identify + .inject_connection_established(peer_id, conn, endpoint, failed_addresses); match self.nodes_info.entry(*peer_id) { Entry::Vacant(e) => { e.insert(NodeInfo::new(endpoint.clone())); @@ -220,9 +234,12 @@ impl NetworkBehaviour for PeerInfoBehaviour { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + handler: ::Handler, ) { - self.ping.inject_connection_closed(peer_id, conn, endpoint); - self.identify.inject_connection_closed(peer_id, conn, endpoint); + let (ping_handler, identity_handler) = handler.into_inner(); + self.identify + .inject_connection_closed(peer_id, conn, endpoint, identity_handler); + self.ping.inject_connection_closed(peer_id, conn, endpoint, ping_handler); if let Some(entry) = self.nodes_info.get_mut(peer_id) { entry.endpoints.retain(|ep| ep != endpoint) @@ -256,19 +273,15 @@ impl NetworkBehaviour for PeerInfoBehaviour { } } - fn inject_addr_reach_failure( + fn inject_dial_failure( &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error, + peer_id: Option, + handler: Self::ProtocolsHandler, + error: &libp2p::swarm::DialError, ) { - self.ping.inject_addr_reach_failure(peer_id, addr, error); - self.identify.inject_addr_reach_failure(peer_id, addr, error); - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - self.ping.inject_dial_failure(peer_id); - self.identify.inject_dial_failure(peer_id); + let (ping_handler, identity_handler) = handler.into_inner(); + self.identify.inject_dial_failure(peer_id, identity_handler, error); + self.ping.inject_dial_failure(peer_id, ping_handler, error); } fn inject_new_listener(&mut self, id: ListenerId) { @@ -296,6 +309,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.inject_expired_external_addr(addr); } + fn inject_listen_failure( + &mut self, + local_addr: &Multiaddr, + send_back_addr: &Multiaddr, + handler: Self::ProtocolsHandler, + ) { + let (ping_handler, identity_handler) = handler.into_inner(); + self.identify + .inject_listen_failure(local_addr, send_back_addr, identity_handler); + self.ping.inject_listen_failure(local_addr, send_back_addr, ping_handler); + } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { self.ping.inject_listener_error(id, err); self.identify.inject_listener_error(id, err); @@ -309,13 +334,8 @@ impl NetworkBehaviour for PeerInfoBehaviour { fn poll( &mut self, cx: &mut Context, - params: &mut impl PollParameters - ) -> Poll< - NetworkBehaviourAction< - <::Handler as ProtocolsHandler>::InEvent, - Self::OutEvent - > - >{ + params: &mut impl PollParameters, + ) -> Poll> { loop { match self.ping.poll(cx, params) { Poll::Pending => break, @@ -324,10 +344,20 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.handle_ping_report(&peer, rtt) } }, - Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }) => { + let handler = + IntoProtocolsHandler::select(handler, self.identify.new_handler()); + return Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }) + }, + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, handler }) => { + let handler = + IntoProtocolsHandler::select(handler, self.identify.new_handler()); + return Poll::Ready(NetworkBehaviourAction::DialPeer { + peer_id, + condition, + handler, + }) + }, Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, @@ -362,10 +392,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { IdentifyEvent::Pushed { .. } => {}, IdentifyEvent::Sent { .. } => {}, }, - Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }) => { + let handler = IntoProtocolsHandler::select(self.ping.new_handler(), handler); + return Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }) + }, + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, handler }) => { + let handler = IntoProtocolsHandler::select(self.ping.new_handler(), handler); + return Poll::Ready(NetworkBehaviourAction::DialPeer { + peer_id, + condition, + handler, + }) + }, Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index a0c52d14fa62f..bfaf42d5ff227 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1362,8 +1362,10 @@ impl NetworkBehaviour for Protocol { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + failed_addresses: Option<&Vec>, ) { - self.behaviour.inject_connection_established(peer_id, conn, endpoint) + self.behaviour + .inject_connection_established(peer_id, conn, endpoint, failed_addresses) } fn inject_connection_closed( @@ -1371,8 +1373,9 @@ impl NetworkBehaviour for Protocol { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + handler: ::Handler, ) { - self.behaviour.inject_connection_closed(peer_id, conn, endpoint) + self.behaviour.inject_connection_closed(peer_id, conn, endpoint, handler) } fn inject_connected(&mut self, peer_id: &PeerId) { @@ -1396,12 +1399,7 @@ impl NetworkBehaviour for Protocol { &mut self, cx: &mut std::task::Context, params: &mut impl PollParameters, - ) -> Poll< - NetworkBehaviourAction< - <::Handler as ProtocolsHandler>::InEvent, - Self::OutEvent - > - >{ + ) -> Poll> { if let Some(message) = self.pending_messages.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } @@ -1562,10 +1560,10 @@ impl NetworkBehaviour for Protocol { let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev, - Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }) => + return Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }), + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, handler }) => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, handler }), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, @@ -1778,17 +1776,13 @@ impl NetworkBehaviour for Protocol { Poll::Pending } - fn inject_addr_reach_failure( + fn inject_dial_failure( &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error, + peer_id: Option, + handler: Self::ProtocolsHandler, + error: &libp2p::swarm::DialError, ) { - self.behaviour.inject_addr_reach_failure(peer_id, addr, error) - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - self.behaviour.inject_dial_failure(peer_id) + self.behaviour.inject_dial_failure(peer_id, handler, error); } fn inject_new_listener(&mut self, id: ListenerId) { diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index f66f1fbe9e95a..26a246f57690f 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -26,7 +26,8 @@ use futures::prelude::*; use libp2p::{ core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, swarm::{ - DialPeerCondition, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, + DialError, DialPeerCondition, IntoProtocolsHandler, NetworkBehaviour, + NetworkBehaviourAction, NotifyHandler, PollParameters, }, }; use log::{error, trace, warn}; @@ -38,7 +39,7 @@ use std::{ borrow::Cow, cmp, collections::{hash_map::Entry, VecDeque}, - error, mem, + mem, pin::Pin, str, sync::Arc, @@ -132,7 +133,7 @@ pub struct Notifications { next_incoming_index: sc_peerset::IncomingIndex, /// Events to produce from `poll()`. - events: VecDeque>, + events: VecDeque>, } /// Configuration for a notifications protocol. @@ -628,6 +629,7 @@ impl Notifications { /// Function that is called when the peerset wants us to connect to a peer. fn peerset_report_connect(&mut self, peer_id: PeerId, set_id: sc_peerset::SetId) { // If `PeerId` is unknown to us, insert an entry, start dialing, and return early. + let handler = self.new_handler(); let mut occ_entry = match self.peers.entry((peer_id, set_id)) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { @@ -643,6 +645,7 @@ impl Notifications { self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id: entry.key().0.clone(), condition: DialPeerCondition::Disconnected, + handler, }); entry.insert(PeerState::Requested); return @@ -679,6 +682,7 @@ impl Notifications { self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id: occ_entry.key().0.clone(), condition: DialPeerCondition::Disconnected, + handler, }); *occ_entry.into_mut() = PeerState::Requested; }, @@ -1094,6 +1098,7 @@ impl NetworkBehaviour for Notifications { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + _failed_addresses: Option<&Vec>, ) { for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { match self.peers.entry((*peer_id, set_id)).or_insert(PeerState::Poisoned) { @@ -1152,6 +1157,7 @@ impl NetworkBehaviour for Notifications { peer_id: &PeerId, conn: &ConnectionId, _endpoint: &ConnectedPoint, + _handler: ::Handler, ) { for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { let mut entry = if let Entry::Occupied(entry) = self.peers.entry((*peer_id, set_id)) { @@ -1411,70 +1417,74 @@ impl NetworkBehaviour for Notifications { fn inject_disconnected(&mut self, _peer_id: &PeerId) {} - fn inject_addr_reach_failure( + fn inject_dial_failure( &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn error::Error, + peer_id: Option, + _: Self::ProtocolsHandler, + error: &DialError, ) { - trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); - } + if let DialError::Transport(errors) = error { + for (addr, error) in errors.iter() { + trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); + } + } - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + if let Some(peer_id) = peer_id { + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { - if let Entry::Occupied(mut entry) = self.peers.entry((peer_id.clone(), set_id)) { - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // The peer is not in our list. - st @ PeerState::Backoff { .. } => { - *entry.into_mut() = st; - }, - - // "Basic" situation: we failed to reach a peer that the peerset requested. - st @ PeerState::Requested | st @ PeerState::PendingRequest { .. } => { - trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, *peer_id, DropReason::Unknown); + for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { + if let Entry::Occupied(mut entry) = self.peers.entry((peer_id.clone(), set_id)) { + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // The peer is not in our list. + st @ PeerState::Backoff { .. } => { + *entry.into_mut() = st; + }, - let now = Instant::now(); - let ban_duration = match st { - PeerState::PendingRequest { timer_deadline, .. } - if timer_deadline > now => - cmp::max(timer_deadline - now, Duration::from_secs(5)), - _ => Duration::from_secs(5), - }; + // "Basic" situation: we failed to reach a peer that the peerset requested. + st @ PeerState::Requested | st @ PeerState::PendingRequest { .. } => { + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + self.peerset.dropped(set_id, peer_id, DropReason::Unknown); - let delay_id = self.next_delay_id; - self.next_delay_id.0 += 1; - let delay = futures_timer::Delay::new(ban_duration); - let peer_id = *peer_id; - self.delays.push( - async move { - delay.await; - (delay_id, peer_id, set_id) - } - .boxed(), - ); - - *entry.into_mut() = PeerState::Backoff { - timer: delay_id, - timer_deadline: now + ban_duration, - }; - }, + let now = Instant::now(); + let ban_duration = match st { + PeerState::PendingRequest { timer_deadline, .. } + if timer_deadline > now => + cmp::max(timer_deadline - now, Duration::from_secs(5)), + _ => Duration::from_secs(5), + }; - // We can still get dial failures even if we are already connected to the peer, - // as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } | - st @ PeerState::Enabled { .. } | - st @ PeerState::DisabledPendingEnable { .. } | - st @ PeerState::Incoming { .. } => { - *entry.into_mut() = st; - }, + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(ban_duration); + let peer_id = peer_id; + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); - PeerState::Poisoned => { - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id); - debug_assert!(false); - }, + *entry.into_mut() = PeerState::Backoff { + timer: delay_id, + timer_deadline: now + ban_duration, + }; + }, + + // We can still get dial failures even if we are already connected to the + // peer, as an extra diagnostic for an earlier attempt. + st @ PeerState::Disabled { .. } | + st @ PeerState::Enabled { .. } | + st @ PeerState::DisabledPendingEnable { .. } | + st @ PeerState::Incoming { .. } => { + *entry.into_mut() = st; + }, + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id); + debug_assert!(false); + }, + } } } } @@ -2000,7 +2010,7 @@ impl NetworkBehaviour for Notifications { &mut self, cx: &mut Context, _params: &mut impl PollParameters, - ) -> Poll> { + ) -> Poll> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event) } @@ -2032,6 +2042,8 @@ impl NetworkBehaviour for Notifications { while let Poll::Ready(Some((delay_id, peer_id, set_id))) = Pin::new(&mut self.delays).poll_next(cx) { + let handler = self.new_handler(); + let peer_state = match self.peers.get_mut(&(peer_id, set_id)) { Some(s) => s, // We intentionally never remove elements from `delays`, and it may @@ -2051,6 +2063,7 @@ impl NetworkBehaviour for Notifications { self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id, condition: DialPeerCondition::Disconnected, + handler, }); *peer_state = PeerState::Requested; }, diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs index 0b3ffc01a4b8d..ffc25194def09 100644 --- a/client/network/src/protocol/notifications/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -29,7 +29,7 @@ use libp2p::{ }, identity, noise, swarm::{ - IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, + DialError, IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler, Swarm, SwarmEvent, }, yamux, Multiaddr, PeerId, Transport, @@ -68,7 +68,7 @@ fn build_nodes() -> (Swarm, Swarm) { in_peers: 25, out_peers: 25, bootnodes: if index == 0 { - keypairs.iter().skip(1).map(|keypair| keypair.public().into_peer_id()).collect() + keypairs.iter().skip(1).map(|keypair| keypair.public().to_peer_id()).collect() } else { vec![] }, @@ -92,7 +92,7 @@ fn build_nodes() -> (Swarm, Swarm) { .enumerate() .filter_map(|(n, a)| { if n != index { - Some((keypairs[n].public().into_peer_id(), a.clone())) + Some((keypairs[n].public().to_peer_id(), a.clone())) } else { None } @@ -100,7 +100,7 @@ fn build_nodes() -> (Swarm, Swarm) { .collect(), }; - let mut swarm = Swarm::new(transport, behaviour, keypairs[index].public().into_peer_id()); + let mut swarm = Swarm::new(transport, behaviour, keypairs[index].public().to_peer_id()); swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -163,8 +163,10 @@ impl NetworkBehaviour for CustomProtoWithAddr { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + failed_addresses: Option<&Vec>, ) { - self.inner.inject_connection_established(peer_id, conn, endpoint) + self.inner + .inject_connection_established(peer_id, conn, endpoint, failed_addresses) } fn inject_connection_closed( @@ -172,8 +174,9 @@ impl NetworkBehaviour for CustomProtoWithAddr { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + handler: ::Handler, ) { - self.inner.inject_connection_closed(peer_id, conn, endpoint) + self.inner.inject_connection_closed(peer_id, conn, endpoint, handler) } fn inject_event( @@ -188,27 +191,18 @@ impl NetworkBehaviour for CustomProtoWithAddr { fn poll( &mut self, cx: &mut Context, - params: &mut impl PollParameters - ) -> Poll< - NetworkBehaviourAction< - <::Handler as ProtocolsHandler>::InEvent, - Self::OutEvent - > - >{ + params: &mut impl PollParameters, + ) -> Poll> { self.inner.poll(cx, params) } - fn inject_addr_reach_failure( + fn inject_dial_failure( &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error, + peer_id: Option, + handler: Self::ProtocolsHandler, + error: &DialError, ) { - self.inner.inject_addr_reach_failure(peer_id, addr, error) - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - self.inner.inject_dial_failure(peer_id) + self.inner.inject_dial_failure(peer_id, handler, error) } fn inject_new_listener(&mut self, id: ListenerId) { diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 0908d7510e359..aa3e1701eee52 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -45,12 +45,12 @@ use libp2p::{ ConnectedPoint, Multiaddr, PeerId, }, request_response::{ - ProtocolSupport, RequestResponse, RequestResponseCodec, RequestResponseConfig, - RequestResponseEvent, RequestResponseMessage, ResponseChannel, + handler::RequestResponseHandler, ProtocolSupport, RequestResponse, RequestResponseCodec, + RequestResponseConfig, RequestResponseEvent, RequestResponseMessage, ResponseChannel, }, swarm::{ - protocols_handler::multi::MultiHandler, NetworkBehaviour, NetworkBehaviourAction, - PollParameters, ProtocolsHandler, + protocols_handler::multi::MultiHandler, IntoProtocolsHandler, NetworkBehaviour, + NetworkBehaviourAction, PollParameters, ProtocolsHandler, }, }; use std::{ @@ -377,6 +377,27 @@ impl RequestResponsesBehaviour { }; } } + + fn new_handler_with_replacement( + &mut self, + protocol: String, + handler: RequestResponseHandler, + ) -> ::ProtocolsHandler { + let mut handlers: HashMap<_, _> = self + .protocols + .iter_mut() + .map(|(p, (r, _))| (p.to_string(), NetworkBehaviour::new_handler(r))) + .collect(); + + if let Some(h) = handlers.get_mut(&protocol) { + *h = handler + } + + MultiHandler::try_from_iter(handlers).expect( + "Protocols are in a HashMap and there can be at most one handler per protocol name, \ + which is the only possible error; qed", + ) + } } impl NetworkBehaviour for RequestResponsesBehaviour { @@ -405,9 +426,16 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + failed_addresses: Option<&Vec>, ) { for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_connection_established(p, peer_id, conn, endpoint) + NetworkBehaviour::inject_connection_established( + p, + peer_id, + conn, + endpoint, + failed_addresses, + ) } } @@ -422,9 +450,11 @@ impl NetworkBehaviour for RequestResponsesBehaviour { peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint, + _handler: ::Handler, ) { for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_connection_closed(p, peer_id, conn, endpoint) + let handler = p.new_handler(); + NetworkBehaviour::inject_connection_closed(p, peer_id, conn, endpoint, handler); } } @@ -434,17 +464,6 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_addr_reach_failure( - &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error, - ) { - for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_addr_reach_failure(p, peer_id, addr, error) - } - } - fn inject_event( &mut self, peer_id: PeerId, @@ -478,9 +497,15 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_dial_failure(&mut self, peer_id: &PeerId) { + fn inject_dial_failure( + &mut self, + peer_id: Option, + _: Self::ProtocolsHandler, + error: &libp2p::swarm::DialError, + ) { for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_dial_failure(p, peer_id) + let handler = p.new_handler(); + NetworkBehaviour::inject_dial_failure(p, peer_id, handler, error) } } @@ -512,12 +537,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { &mut self, cx: &mut Context, params: &mut impl PollParameters, - ) -> Poll< - NetworkBehaviourAction< - ::InEvent, - Self::OutEvent, - >, - > { + ) -> Poll> { 'poll_all: loop { if let Some(message_request) = self.message_request.take() { // Now we can can poll `MessageRequest` until we get the reputation @@ -658,17 +678,26 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Other events generated by the underlying behaviour are transparently // passed through. - NetworkBehaviourAction::DialAddress { address } => { + NetworkBehaviourAction::DialAddress { address, handler } => { log::error!( "The request-response isn't supposed to start dialing peers" ); - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + let protocol = protocol.to_string(); + let handler = self.new_handler_with_replacement(protocol, handler); + return Poll::Ready(NetworkBehaviourAction::DialAddress { + address, + handler, + }) }, - NetworkBehaviourAction::DialPeer { peer_id, condition } => + NetworkBehaviourAction::DialPeer { peer_id, condition, handler } => { + let protocol = protocol.to_string(); + let handler = self.new_handler_with_replacement(protocol, handler); return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, - }), + handler, + }) + }, NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, @@ -1061,7 +1090,7 @@ mod tests { let behaviour = RequestResponsesBehaviour::new(list, handle).unwrap(); - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let mut swarm = Swarm::new(transport, behaviour, keypair.public().to_peer_id()); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); swarm.listen_on(listen_addr.clone()).unwrap(); diff --git a/client/network/src/service.rs b/client/network/src/service.rs index caf4db89f653a..3dadf810c6b49 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -56,10 +56,10 @@ use libp2p::{ }, kad::record, multiaddr, - ping::handler::PingFailure, + ping::Failure as PingFailure, swarm::{ - protocols_handler::NodeHandlerWrapperError, AddressScore, NetworkBehaviour, SwarmBuilder, - SwarmEvent, + protocols_handler::NodeHandlerWrapperError, AddressScore, DialError, NetworkBehaviour, + SwarmBuilder, SwarmEvent, }, Multiaddr, PeerId, }; @@ -176,7 +176,7 @@ impl NetworkWorker { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); - let local_peer_id = local_public.clone().into_peer_id(); + let local_peer_id = local_public.clone().to_peer_id(); info!( target: "sub-libp2p", "🏷 Local node identity is: {}", @@ -1845,8 +1845,13 @@ impl Future for NetworkWorker { peer_id, endpoint, num_established, + concurrent_dial_errors, }) => { - debug!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + if let Some(errors) = concurrent_dial_errors { + debug!(target: "sub-libp2p", "Libp2p => Connected({:?}) with errors: {:?}", peer_id, errors); + } else { + debug!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + } if let Some(metrics) = this.metrics.as_ref() { let direction = match endpoint { @@ -1914,37 +1919,41 @@ impl Future for NetworkWorker { metrics.listeners_local_addresses.dec(); } }, - Poll::Ready(SwarmEvent::UnreachableAddr { peer_id, address, error, .. }) => { - trace!( - target: "sub-libp2p", - "Libp2p => Failed to reach {:?} through {:?}: {}", - peer_id, address, error, - ); + Poll::Ready(SwarmEvent::OutgoingConnectionError { peer_id, error }) => { + if let Some(peer_id) = peer_id { + trace!( + target: "sub-libp2p", + "Libp2p => Failed to reach {:?}: {}", + peer_id, error, + ); - if this.boot_node_ids.contains(&peer_id) { - if let PendingConnectionError::InvalidPeerId = error { - error!( - "💔 The bootnode you want to connect to at `{}` provided a different peer ID than the one you expect: `{}`.", - address, peer_id, - ); + if this.boot_node_ids.contains(&peer_id) { + if let DialError::InvalidPeerId = error { + error!( + "💔 The bootnode you want to connect provided a different peer ID than the one you expect: `{}`.", + peer_id, + ); + } } } if let Some(metrics) = this.metrics.as_ref() { - match error { - PendingConnectionError::ConnectionLimit(_) => metrics - .pending_connections_errors_total - .with_label_values(&["limit-reached"]) - .inc(), - PendingConnectionError::InvalidPeerId => metrics - .pending_connections_errors_total - .with_label_values(&["invalid-peer-id"]) - .inc(), - PendingConnectionError::Transport(_) | - PendingConnectionError::IO(_) => metrics + let reason = match error { + DialError::ConnectionLimit(_) => Some("limit-reached"), + DialError::InvalidPeerId => Some("invalid-peer-id"), + DialError::Transport(_) | DialError::ConnectionIo(_) => + Some("transport-error"), + DialError::Banned | + DialError::LocalPeerId | + DialError::NoAddresses | + DialError::DialPeerConditionFalse(_) | + DialError::Aborted => None, // ignore them + }; + if let Some(reason) = reason { + metrics .pending_connections_errors_total - .with_label_values(&["transport-error"]) - .inc(), + .with_label_values(&[reason]) + .inc(); } } }, @@ -1970,16 +1979,19 @@ impl Future for NetworkWorker { ); if let Some(metrics) = this.metrics.as_ref() { let reason = match error { - PendingConnectionError::ConnectionLimit(_) => "limit-reached", - PendingConnectionError::InvalidPeerId => "invalid-peer-id", + PendingConnectionError::ConnectionLimit(_) => Some("limit-reached"), + PendingConnectionError::InvalidPeerId => Some("invalid-peer-id"), PendingConnectionError::Transport(_) | - PendingConnectionError::IO(_) => "transport-error", + PendingConnectionError::IO(_) => Some("transport-error"), + PendingConnectionError::Aborted => None, // ignore it }; - metrics - .incoming_connections_errors_total - .with_label_values(&[reason]) - .inc(); + if let Some(reason) = reason { + metrics + .incoming_connections_errors_total + .with_label_values(&[reason]) + .inc(); + } } }, Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { @@ -1995,9 +2007,6 @@ impl Future for NetworkWorker { .inc(); } }, - Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => { - trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", address, error) - }, Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses, .. }) => { if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_local_addresses.sub(addresses.len() as u64); diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 539c57fe4cb91..84aab96a673d6 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -20,7 +20,7 @@ parking_lot = "0.11.1" futures = "0.3.9" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.39.1", default-features = false } +libp2p = { version = "0.40.0", default-features = false } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index fba499bce00e3..ec226b3d9bee2 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" -libp2p = { version = "0.39.1", default-features = false } +libp2p = { version = "0.40.0", default-features = false } sc-utils = { version = "4.0.0-dev", path = "../utils"} log = "0.4.8" serde_json = "1.0.71" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 9165f6f87af68..73efdbc039c51 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] parking_lot = "0.11.1" futures = "0.3.9" wasm-timer = "0.2.5" -libp2p = { version = "0.39.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.40.0", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "1.0.8" rand = "0.7.2" From 5714e30cd280906a8f20e699b187fdef22989ffc Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 25 Nov 2021 02:24:45 -0800 Subject: [PATCH 146/162] REAL fix for `min-square` for `--steps=1 --repeats=1` (#10323) (#10369) * Revert "frame-benchmarking: Fix `min-square` for `--steps=1` (#10323)" This reverts commit fe7c02941122bbe4a7956aff53739b62f575240d. * actual fix --- frame/benchmarking/src/analysis.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 6e0ffd23ee988..f655e8fa76320 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -199,7 +199,7 @@ impl Analysis { } pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { - if r[0].components.len() <= 1 { + if r[0].components.is_empty() || r.len() <= 2 { return Self::median_value(r, selector) } From 120bd65e3ee8113f83bf77843ed239daf8cdbbfd Mon Sep 17 00:00:00 2001 From: Andreas Doerr Date: Thu, 25 Nov 2021 11:26:10 +0100 Subject: [PATCH 147/162] `BEEFY` add tests for rounds (#10328) * new_rounds() * WIP * test add_vote() * test drop() * learn to spell * go get some coffee * cargo fmt * lump everythings together again --- Cargo.lock | 1 + client/beefy/Cargo.toml | 1 + client/beefy/src/gossip.rs | 178 ++++++++++++++++++- client/beefy/src/gossip_tests.rs | 182 ------------------- client/beefy/src/keystore.rs | 269 +++++++++++++++++++++++++++- client/beefy/src/keystore_tests.rs | 275 ----------------------------- client/beefy/src/round.rs | 151 ++++++++++++++++ 7 files changed, 592 insertions(+), 465 deletions(-) delete mode 100644 client/beefy/src/gossip_tests.rs delete mode 100644 client/beefy/src/keystore_tests.rs diff --git a/Cargo.lock b/Cargo.lock index 3e956ea462ee7..08d39c1d2b3de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -494,6 +494,7 @@ dependencies = [ "sp-core", "sp-keystore", "sp-runtime", + "sp-tracing", "strum", "substrate-prometheus-endpoint", "thiserror", diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 96e5bc4ffbf31..23c9294d35100 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -35,6 +35,7 @@ sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } [dev-dependencies] +sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } sc-network-test = { version = "0.8.0", path = "../network/test" } strum = { version = "0.22", features = ["derive"] } diff --git a/client/beefy/src/gossip.rs b/client/beefy/src/gossip.rs index d0199964b6ebf..8a43b5a039478 100644 --- a/client/beefy/src/gossip.rs +++ b/client/beefy/src/gossip.rs @@ -35,10 +35,6 @@ use beefy_primitives::{ use crate::keystore::BeefyKeystore; -#[cfg(test)] -#[path = "gossip_tests.rs"] -mod tests; - // Limit BEEFY gossip by keeping only a bound number of voting rounds alive. const MAX_LIVE_GOSSIP_ROUNDS: usize = 3; @@ -234,3 +230,177 @@ where }) } } + +#[cfg(test)] +mod tests { + use sc_keystore::LocalKeystore; + use sc_network_test::Block; + use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + + use beefy_primitives::{crypto::Signature, Commitment, MmrRootHash, VoteMessage, KEY_TYPE}; + + use crate::keystore::{tests::Keyring, BeefyKeystore}; + + use super::*; + + #[test] + fn note_round_works() { + let gv = GossipValidator::::new(); + + gv.note_round(1u64); + + let live = gv.known_votes.read(); + assert!(GossipValidator::::is_live(&live, &1u64)); + + drop(live); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(!GossipValidator::::is_live(&live, &1u64)); + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(GossipValidator::::is_live(&live, &7u64)); + assert!(GossipValidator::::is_live(&live, &10u64)); + } + + #[test] + fn keeps_most_recent_max_rounds() { + let gv = GossipValidator::::new(); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + gv.note_round(1u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(!GossipValidator::::is_live(&live, &1u64)); + + drop(live); + + gv.note_round(23u64); + gv.note_round(15u64); + gv.note_round(20u64); + gv.note_round(2u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &15u64)); + assert!(GossipValidator::::is_live(&live, &20u64)); + assert!(GossipValidator::::is_live(&live, &23u64)); + } + + #[test] + fn note_same_round_twice() { + let gv = GossipValidator::::new(); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + drop(live); + + // note round #7 again -> should not change anything + gv.note_round(7u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(GossipValidator::::is_live(&live, &7u64)); + assert!(GossipValidator::::is_live(&live, &10u64)); + } + + struct TestContext; + impl ValidatorContext for TestContext { + fn broadcast_topic(&mut self, _topic: B::Hash, _force: bool) { + todo!() + } + + fn broadcast_message(&mut self, _topic: B::Hash, _message: Vec, _force: bool) { + todo!() + } + + fn send_message(&mut self, _who: &sc_network::PeerId, _message: Vec) { + todo!() + } + + fn send_topic(&mut self, _who: &sc_network::PeerId, _topic: B::Hash, _force: bool) { + todo!() + } + } + + fn sign_commitment( + who: &Keyring, + commitment: &Commitment, + ) -> Signature { + let store: SyncCryptoStorePtr = std::sync::Arc::new(LocalKeystore::in_memory()); + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&who.to_seed())).unwrap(); + let beefy_keystore: BeefyKeystore = Some(store).into(); + + beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() + } + + #[test] + fn should_avoid_verifying_signatures_twice() { + let gv = GossipValidator::::new(); + let sender = sc_network::PeerId::random(); + let mut context = TestContext; + + let commitment = Commitment { + payload: MmrRootHash::default(), + block_number: 3_u64, + validator_set_id: 0, + }; + + let signature = sign_commitment(&Keyring::Alice, &commitment); + + let vote = VoteMessage { commitment, id: Keyring::Alice.public(), signature }; + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + // first time the cache should be populated. + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + assert_eq!( + gv.known_votes.read().get(&vote.commitment.block_number).map(|x| x.len()), + Some(1) + ); + + // second time we should hit the cache + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + + // next we should quickly reject if the round is not live. + gv.note_round(11_u64); + gv.note_round(12_u64); + + assert!(!GossipValidator::::is_live( + &*gv.known_votes.read(), + &vote.commitment.block_number + )); + + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::Discard)); + } +} diff --git a/client/beefy/src/gossip_tests.rs b/client/beefy/src/gossip_tests.rs deleted file mode 100644 index 2d46b873cb7b0..0000000000000 --- a/client/beefy/src/gossip_tests.rs +++ /dev/null @@ -1,182 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use sc_keystore::LocalKeystore; -use sc_network_test::Block; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; - -use beefy_primitives::{crypto::Signature, Commitment, MmrRootHash, VoteMessage, KEY_TYPE}; - -use crate::keystore::{tests::Keyring, BeefyKeystore}; - -use super::*; - -#[test] -fn note_round_works() { - let gv = GossipValidator::::new(); - - gv.note_round(1u64); - - let live = gv.known_votes.read(); - assert!(GossipValidator::::is_live(&live, &1u64)); - - drop(live); - - gv.note_round(3u64); - gv.note_round(7u64); - gv.note_round(10u64); - - let live = gv.known_votes.read(); - - assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); - - assert!(!GossipValidator::::is_live(&live, &1u64)); - assert!(GossipValidator::::is_live(&live, &3u64)); - assert!(GossipValidator::::is_live(&live, &7u64)); - assert!(GossipValidator::::is_live(&live, &10u64)); -} - -#[test] -fn keeps_most_recent_max_rounds() { - let gv = GossipValidator::::new(); - - gv.note_round(3u64); - gv.note_round(7u64); - gv.note_round(10u64); - gv.note_round(1u64); - - let live = gv.known_votes.read(); - - assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); - - assert!(GossipValidator::::is_live(&live, &3u64)); - assert!(!GossipValidator::::is_live(&live, &1u64)); - - drop(live); - - gv.note_round(23u64); - gv.note_round(15u64); - gv.note_round(20u64); - gv.note_round(2u64); - - let live = gv.known_votes.read(); - - assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); - - assert!(GossipValidator::::is_live(&live, &15u64)); - assert!(GossipValidator::::is_live(&live, &20u64)); - assert!(GossipValidator::::is_live(&live, &23u64)); -} - -#[test] -fn note_same_round_twice() { - let gv = GossipValidator::::new(); - - gv.note_round(3u64); - gv.note_round(7u64); - gv.note_round(10u64); - - let live = gv.known_votes.read(); - - assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); - - drop(live); - - // note round #7 again -> should not change anything - gv.note_round(7u64); - - let live = gv.known_votes.read(); - - assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); - - assert!(GossipValidator::::is_live(&live, &3u64)); - assert!(GossipValidator::::is_live(&live, &7u64)); - assert!(GossipValidator::::is_live(&live, &10u64)); -} - -struct TestContext; -impl ValidatorContext for TestContext { - fn broadcast_topic(&mut self, _topic: B::Hash, _force: bool) { - todo!() - } - - fn broadcast_message(&mut self, _topic: B::Hash, _message: Vec, _force: bool) { - todo!() - } - - fn send_message(&mut self, _who: &sc_network::PeerId, _message: Vec) { - todo!() - } - - fn send_topic(&mut self, _who: &sc_network::PeerId, _topic: B::Hash, _force: bool) { - todo!() - } -} - -fn sign_commitment( - who: &Keyring, - commitment: &Commitment, -) -> Signature { - let store: SyncCryptoStorePtr = std::sync::Arc::new(LocalKeystore::in_memory()); - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&who.to_seed())).unwrap(); - let beefy_keystore: BeefyKeystore = Some(store).into(); - - beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() -} - -#[test] -fn should_avoid_verifying_signatures_twice() { - let gv = GossipValidator::::new(); - let sender = sc_network::PeerId::random(); - let mut context = TestContext; - - let commitment = - Commitment { payload: MmrRootHash::default(), block_number: 3_u64, validator_set_id: 0 }; - - let signature = sign_commitment(&Keyring::Alice, &commitment); - - let vote = VoteMessage { commitment, id: Keyring::Alice.public(), signature }; - - gv.note_round(3u64); - gv.note_round(7u64); - gv.note_round(10u64); - - // first time the cache should be populated. - let res = gv.validate(&mut context, &sender, &vote.encode()); - - assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); - assert_eq!(gv.known_votes.read().get(&vote.commitment.block_number).map(|x| x.len()), Some(1)); - - // second time we should hit the cache - let res = gv.validate(&mut context, &sender, &vote.encode()); - - assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); - - // next we should quickly reject if the round is not live. - gv.note_round(11_u64); - gv.note_round(12_u64); - - assert!(!GossipValidator::::is_live( - &*gv.known_votes.read(), - &vote.commitment.block_number - )); - - let res = gv.validate(&mut context, &sender, &vote.encode()); - - assert!(matches!(res, ValidationResult::Discard)); -} diff --git a/client/beefy/src/keystore.rs b/client/beefy/src/keystore.rs index 88618b8a5a140..7ee1ceb46bc35 100644 --- a/client/beefy/src/keystore.rs +++ b/client/beefy/src/keystore.rs @@ -31,10 +31,6 @@ use beefy_primitives::{ use crate::error; -#[cfg(test)] -#[path = "keystore_tests.rs"] -pub mod tests; - /// A BEEFY specific keystore implemented as a `Newtype`. This is basically a /// wrapper around [`sp_keystore::SyncCryptoStore`] and allows to customize /// common cryptographic functionality. @@ -117,3 +113,268 @@ impl From> for BeefyKeystore { BeefyKeystore(store) } } + +#[cfg(test)] +pub mod tests { + use std::sync::Arc; + + use sc_keystore::LocalKeystore; + use sp_core::{ecdsa, keccak_256, Pair}; + use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + + use beefy_primitives::{crypto, KEY_TYPE}; + + use super::BeefyKeystore; + use crate::error::Error; + + /// Set of test accounts using [`beefy_primitives::crypto`] types. + #[allow(missing_docs)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, strum::Display, strum::EnumIter)] + pub(crate) enum Keyring { + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, + } + + impl Keyring { + /// Sign `msg`. + pub fn sign(self, msg: &[u8]) -> crypto::Signature { + let msg = keccak_256(msg); + ecdsa::Pair::from(self).sign_prehashed(&msg).into() + } + + /// Return key pair. + pub fn pair(self) -> crypto::Pair { + ecdsa::Pair::from_string(self.to_seed().as_str(), None).unwrap().into() + } + + /// Return public key. + pub fn public(self) -> crypto::Public { + self.pair().public() + } + + /// Return seed string. + pub fn to_seed(self) -> String { + format!("//{}", self) + } + } + + impl From for crypto::Pair { + fn from(k: Keyring) -> Self { + k.pair() + } + } + + impl From for ecdsa::Pair { + fn from(k: Keyring) -> Self { + k.pair().into() + } + } + + fn keystore() -> SyncCryptoStorePtr { + Arc::new(LocalKeystore::in_memory()) + } + + #[test] + fn verify_should_work() { + let msg = keccak_256(b"I am Alice!"); + let sig = Keyring::Alice.sign(b"I am Alice!"); + + assert!(ecdsa::Pair::verify_prehashed( + &sig.clone().into(), + &msg, + &Keyring::Alice.public().into(), + )); + + // different public key -> fail + assert!(!ecdsa::Pair::verify_prehashed( + &sig.clone().into(), + &msg, + &Keyring::Bob.public().into(), + )); + + let msg = keccak_256(b"I am not Alice!"); + + // different msg -> fail + assert!( + !ecdsa::Pair::verify_prehashed(&sig.into(), &msg, &Keyring::Alice.public().into(),) + ); + } + + #[test] + fn pair_works() { + let want = crypto::Pair::from_string("//Alice", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Alice.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Bob", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Bob.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Charlie", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Charlie.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Dave", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Dave.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Eve", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Eve.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Ferdie", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Ferdie.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//One", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::One.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Two", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Two.pair().to_raw_vec(); + assert_eq!(want, got); + } + + #[test] + fn authority_id_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let bob = Keyring::Bob.public(); + let charlie = Keyring::Charlie.public(); + + let store: BeefyKeystore = Some(store).into(); + + let mut keys = vec![bob, charlie]; + + let id = store.authority_id(keys.as_slice()); + assert!(id.is_none()); + + keys.push(alice.clone()); + + let id = store.authority_id(keys.as_slice()).unwrap(); + assert_eq!(id, alice); + } + + #[test] + fn sign_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let store: BeefyKeystore = Some(store).into(); + + let msg = b"are you involved or commited?"; + + let sig1 = store.sign(&alice, msg).unwrap(); + let sig2 = Keyring::Alice.sign(msg); + + assert_eq!(sig1, sig2); + } + + #[test] + fn sign_error() { + let store = keystore(); + + let _ = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Bob.to_seed())) + .ok() + .unwrap(); + + let store: BeefyKeystore = Some(store).into(); + + let alice = Keyring::Alice.public(); + + let msg = b"are you involved or commited?"; + let sig = store.sign(&alice, msg).err().unwrap(); + let err = Error::Signature("ecdsa_sign_prehashed() failed".to_string()); + + assert_eq!(sig, err); + } + + #[test] + fn sign_no_keystore() { + let store: BeefyKeystore = None.into(); + + let alice = Keyring::Alice.public(); + let msg = b"are you involved or commited"; + + let sig = store.sign(&alice, msg).err().unwrap(); + let err = Error::Keystore("no Keystore".to_string()); + assert_eq!(sig, err); + } + + #[test] + fn verify_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let store: BeefyKeystore = Some(store).into(); + + // `msg` and `sig` match + let msg = b"are you involved or commited?"; + let sig = store.sign(&alice, msg).unwrap(); + assert!(BeefyKeystore::verify(&alice, &sig, msg)); + + // `msg and `sig` don't match + let msg = b"you are just involved"; + assert!(!BeefyKeystore::verify(&alice, &sig, msg)); + } + + // Note that we use keys with and without a seed for this test. + #[test] + fn public_keys_works() { + const TEST_TYPE: sp_application_crypto::KeyTypeId = + sp_application_crypto::KeyTypeId(*b"test"); + + let store = keystore(); + + let add_key = |key_type, seed: Option<&str>| { + SyncCryptoStore::ecdsa_generate_new(&*store, key_type, seed).unwrap() + }; + + // test keys + let _ = add_key(TEST_TYPE, Some(Keyring::Alice.to_seed().as_str())); + let _ = add_key(TEST_TYPE, Some(Keyring::Bob.to_seed().as_str())); + + let _ = add_key(TEST_TYPE, None); + let _ = add_key(TEST_TYPE, None); + + // BEEFY keys + let _ = add_key(KEY_TYPE, Some(Keyring::Dave.to_seed().as_str())); + let _ = add_key(KEY_TYPE, Some(Keyring::Eve.to_seed().as_str())); + + let key1: crypto::Public = add_key(KEY_TYPE, None).into(); + let key2: crypto::Public = add_key(KEY_TYPE, None).into(); + + let store: BeefyKeystore = Some(store).into(); + + let keys = store.public_keys().ok().unwrap(); + + assert!(keys.len() == 4); + assert!(keys.contains(&Keyring::Dave.public())); + assert!(keys.contains(&Keyring::Eve.public())); + assert!(keys.contains(&key1)); + assert!(keys.contains(&key2)); + } +} diff --git a/client/beefy/src/keystore_tests.rs b/client/beefy/src/keystore_tests.rs deleted file mode 100644 index 99e3e42228df2..0000000000000 --- a/client/beefy/src/keystore_tests.rs +++ /dev/null @@ -1,275 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::sync::Arc; - -use sc_keystore::LocalKeystore; -use sp_core::{ecdsa, keccak_256, Pair}; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; - -use beefy_primitives::{crypto, KEY_TYPE}; - -use super::BeefyKeystore; -use crate::error::Error; - -/// Set of test accounts using [`beefy_primitives::crypto`] types. -#[allow(missing_docs)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, strum::Display, strum::EnumIter)] -pub(crate) enum Keyring { - Alice, - Bob, - Charlie, - Dave, - Eve, - Ferdie, - One, - Two, -} - -impl Keyring { - /// Sign `msg`. - pub fn sign(self, msg: &[u8]) -> crypto::Signature { - let msg = keccak_256(msg); - ecdsa::Pair::from(self).sign_prehashed(&msg).into() - } - - /// Return key pair. - pub fn pair(self) -> crypto::Pair { - ecdsa::Pair::from_string(self.to_seed().as_str(), None).unwrap().into() - } - - /// Return public key. - pub fn public(self) -> crypto::Public { - self.pair().public() - } - - /// Return seed string. - pub fn to_seed(self) -> String { - format!("//{}", self) - } -} - -impl From for crypto::Pair { - fn from(k: Keyring) -> Self { - k.pair() - } -} - -impl From for ecdsa::Pair { - fn from(k: Keyring) -> Self { - k.pair().into() - } -} - -fn keystore() -> SyncCryptoStorePtr { - Arc::new(LocalKeystore::in_memory()) -} - -#[test] -fn verify_should_work() { - let msg = keccak_256(b"I am Alice!"); - let sig = Keyring::Alice.sign(b"I am Alice!"); - - assert!(ecdsa::Pair::verify_prehashed( - &sig.clone().into(), - &msg, - &Keyring::Alice.public().into(), - )); - - // different public key -> fail - assert!(!ecdsa::Pair::verify_prehashed( - &sig.clone().into(), - &msg, - &Keyring::Bob.public().into(), - )); - - let msg = keccak_256(b"I am not Alice!"); - - // different msg -> fail - assert!(!ecdsa::Pair::verify_prehashed(&sig.into(), &msg, &Keyring::Alice.public().into(),)); -} - -#[test] -fn pair_works() { - let want = crypto::Pair::from_string("//Alice", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::Alice.pair().to_raw_vec(); - assert_eq!(want, got); - - let want = crypto::Pair::from_string("//Bob", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::Bob.pair().to_raw_vec(); - assert_eq!(want, got); - - let want = crypto::Pair::from_string("//Charlie", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::Charlie.pair().to_raw_vec(); - assert_eq!(want, got); - - let want = crypto::Pair::from_string("//Dave", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::Dave.pair().to_raw_vec(); - assert_eq!(want, got); - - let want = crypto::Pair::from_string("//Eve", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::Eve.pair().to_raw_vec(); - assert_eq!(want, got); - - let want = crypto::Pair::from_string("//Ferdie", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::Ferdie.pair().to_raw_vec(); - assert_eq!(want, got); - - let want = crypto::Pair::from_string("//One", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::One.pair().to_raw_vec(); - assert_eq!(want, got); - - let want = crypto::Pair::from_string("//Two", None).expect("Pair failed").to_raw_vec(); - let got = Keyring::Two.pair().to_raw_vec(); - assert_eq!(want, got); -} - -#[test] -fn authority_id_works() { - let store = keystore(); - - let alice: crypto::Public = - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) - .ok() - .unwrap() - .into(); - - let bob = Keyring::Bob.public(); - let charlie = Keyring::Charlie.public(); - - let store: BeefyKeystore = Some(store).into(); - - let mut keys = vec![bob, charlie]; - - let id = store.authority_id(keys.as_slice()); - assert!(id.is_none()); - - keys.push(alice.clone()); - - let id = store.authority_id(keys.as_slice()).unwrap(); - assert_eq!(id, alice); -} - -#[test] -fn sign_works() { - let store = keystore(); - - let alice: crypto::Public = - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) - .ok() - .unwrap() - .into(); - - let store: BeefyKeystore = Some(store).into(); - - let msg = b"are you involved or commited?"; - - let sig1 = store.sign(&alice, msg).unwrap(); - let sig2 = Keyring::Alice.sign(msg); - - assert_eq!(sig1, sig2); -} - -#[test] -fn sign_error() { - let store = keystore(); - - let _ = SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Bob.to_seed())) - .ok() - .unwrap(); - - let store: BeefyKeystore = Some(store).into(); - - let alice = Keyring::Alice.public(); - - let msg = b"are you involved or commited?"; - let sig = store.sign(&alice, msg).err().unwrap(); - let err = Error::Signature("ecdsa_sign_prehashed() failed".to_string()); - - assert_eq!(sig, err); -} - -#[test] -fn sign_no_keystore() { - let store: BeefyKeystore = None.into(); - - let alice = Keyring::Alice.public(); - let msg = b"are you involved or commited"; - - let sig = store.sign(&alice, msg).err().unwrap(); - let err = Error::Keystore("no Keystore".to_string()); - assert_eq!(sig, err); -} - -#[test] -fn verify_works() { - let store = keystore(); - - let alice: crypto::Public = - SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) - .ok() - .unwrap() - .into(); - - let store: BeefyKeystore = Some(store).into(); - - // `msg` and `sig` match - let msg = b"are you involved or commited?"; - let sig = store.sign(&alice, msg).unwrap(); - assert!(BeefyKeystore::verify(&alice, &sig, msg)); - - // `msg and `sig` don't match - let msg = b"you are just involved"; - assert!(!BeefyKeystore::verify(&alice, &sig, msg)); -} - -// Note that we use keys with and without a seed for this test. -#[test] -fn public_keys_works() { - const TEST_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"test"); - - let store = keystore(); - - let add_key = |key_type, seed: Option<&str>| { - SyncCryptoStore::ecdsa_generate_new(&*store, key_type, seed).unwrap() - }; - - // test keys - let _ = add_key(TEST_TYPE, Some(Keyring::Alice.to_seed().as_str())); - let _ = add_key(TEST_TYPE, Some(Keyring::Bob.to_seed().as_str())); - - let _ = add_key(TEST_TYPE, None); - let _ = add_key(TEST_TYPE, None); - - // BEEFY keys - let _ = add_key(KEY_TYPE, Some(Keyring::Dave.to_seed().as_str())); - let _ = add_key(KEY_TYPE, Some(Keyring::Eve.to_seed().as_str())); - - let key1: crypto::Public = add_key(KEY_TYPE, None).into(); - let key2: crypto::Public = add_key(KEY_TYPE, None).into(); - - let store: BeefyKeystore = Some(store).into(); - - let keys = store.public_keys().ok().unwrap(); - - assert!(keys.len() == 4); - assert!(keys.contains(&Keyring::Dave.public())); - assert!(keys.contains(&Keyring::Eve.public())); - assert!(keys.contains(&key1)); - assert!(keys.contains(&key2)); -} diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index 51284c9bd2f6e..e9f5ad2062433 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -123,3 +123,154 @@ where ) } } + +#[cfg(test)] +mod tests { + use sc_network_test::Block; + use sp_core::H256; + use sp_runtime::traits::NumberFor; + + use beefy_primitives::{crypto::Public, ValidatorSet}; + + use super::Rounds; + use crate::keystore::tests::Keyring; + + #[test] + fn new_rounds() { + sp_tracing::try_init_simple(); + + let rounds = Rounds::>::new(ValidatorSet::::empty()); + + assert_eq!(0, rounds.validator_set_id()); + assert!(rounds.validators().is_empty()); + + let validators = ValidatorSet:: { + validators: vec![ + Keyring::Alice.public(), + Keyring::Bob.public(), + Keyring::Charlie.public(), + ], + id: 42, + }; + + let rounds = Rounds::>::new(validators); + + assert_eq!(42, rounds.validator_set_id()); + + assert_eq!( + vec![Keyring::Alice.public(), Keyring::Bob.public(), Keyring::Charlie.public()], + rounds.validators() + ); + } + + #[test] + fn add_vote() { + sp_tracing::try_init_simple(); + + let validators = ValidatorSet:: { + validators: vec![ + Keyring::Alice.public(), + Keyring::Bob.public(), + Keyring::Charlie.public(), + ], + id: Default::default(), + }; + + let mut rounds = Rounds::>::new(validators); + + assert!(rounds.add_vote( + (H256::from_low_u64_le(1), 1), + (Keyring::Alice.public(), Keyring::Alice.sign(b"I am committed")) + )); + + assert!(!rounds.is_done(&(H256::from_low_u64_le(1), 1))); + + // invalid vote + assert!(!rounds.add_vote( + (H256::from_low_u64_le(1), 1), + (Keyring::Dave.public(), Keyring::Dave.sign(b"I am committed")) + )); + + assert!(!rounds.is_done(&(H256::from_low_u64_le(1), 1))); + + assert!(rounds.add_vote( + (H256::from_low_u64_le(1), 1), + (Keyring::Bob.public(), Keyring::Bob.sign(b"I am committed")) + )); + + assert!(!rounds.is_done(&(H256::from_low_u64_le(1), 1))); + + assert!(rounds.add_vote( + (H256::from_low_u64_le(1), 1), + (Keyring::Charlie.public(), Keyring::Charlie.sign(b"I am committed")) + )); + + assert!(rounds.is_done(&(H256::from_low_u64_le(1), 1))); + } + + #[test] + fn drop() { + sp_tracing::try_init_simple(); + + let validators = ValidatorSet:: { + validators: vec![ + Keyring::Alice.public(), + Keyring::Bob.public(), + Keyring::Charlie.public(), + ], + id: Default::default(), + }; + + let mut rounds = Rounds::>::new(validators); + + // round 1 + rounds.add_vote( + (H256::from_low_u64_le(1), 1), + (Keyring::Alice.public(), Keyring::Alice.sign(b"I am committed")), + ); + rounds.add_vote( + (H256::from_low_u64_le(1), 1), + (Keyring::Bob.public(), Keyring::Bob.sign(b"I am committed")), + ); + + // round 2 + rounds.add_vote( + (H256::from_low_u64_le(2), 2), + (Keyring::Alice.public(), Keyring::Alice.sign(b"I am again committed")), + ); + rounds.add_vote( + (H256::from_low_u64_le(2), 2), + (Keyring::Bob.public(), Keyring::Bob.sign(b"I am again committed")), + ); + + // round 3 + rounds.add_vote( + (H256::from_low_u64_le(3), 3), + (Keyring::Alice.public(), Keyring::Alice.sign(b"I am still committed")), + ); + rounds.add_vote( + (H256::from_low_u64_le(3), 3), + (Keyring::Bob.public(), Keyring::Bob.sign(b"I am still committed")), + ); + + assert_eq!(3, rounds.rounds.len()); + + // drop unknown round + assert!(rounds.drop(&(H256::from_low_u64_le(5), 5)).is_none()); + assert_eq!(3, rounds.rounds.len()); + + // drop round 2 + let signatures = rounds.drop(&(H256::from_low_u64_le(2), 2)).unwrap(); + + assert_eq!(2, rounds.rounds.len()); + + assert_eq!( + signatures, + vec![ + Some(Keyring::Alice.sign(b"I am again committed")), + Some(Keyring::Bob.sign(b"I am again committed")), + None + ] + ); + } +} From 4f3c3a5d9b6587482f5a57e83c44fcfb79d4d844 Mon Sep 17 00:00:00 2001 From: dharjeezy Date: Fri, 26 Nov 2021 05:38:21 +0100 Subject: [PATCH 148/162] remove base weight annotations (#10373) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * remove base weight annotations * Update frame/scheduler/src/lib.rs Co-authored-by: Shawn Tabrizi Co-authored-by: Damilare Co-authored-by: Bastian Köcher Co-authored-by: Shawn Tabrizi --- frame/balances/src/lib.rs | 17 -------------- frame/scheduler/src/lib.rs | 46 -------------------------------------- frame/system/src/lib.rs | 29 ------------------------ 3 files changed, 92 deletions(-) diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index b7c64da460768..4471ed91a9afc 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -271,8 +271,6 @@ pub mod pallet { /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional check /// that the transfer will not kill the origin account. /// --------------------------------- - /// - Base Weight: 73.64 µs, worst case scenario (account created, account removed) - /// - DB Weight: 1 Read and 1 Write to destination account /// - Origin account is already in memory, so no DB operations for them. /// # #[pallet::weight(T::WeightInfo::transfer())] @@ -300,16 +298,6 @@ pub mod pallet { /// it will reset the account nonce (`frame_system::AccountNonce`). /// /// The dispatch origin for this call is `root`. - /// - /// # - /// - Independent of the arguments. - /// - Contains a limited number of reads and writes. - /// --------------------- - /// - Base Weight: - /// - Creating: 27.56 µs - /// - Killing: 35.11 µs - /// - DB Weight: 1 Read, 1 Write to `who` - /// # #[pallet::weight( T::WeightInfo::set_balance_creating() // Creates a new account. .max(T::WeightInfo::set_balance_killing()) // Kills an existing account. @@ -381,11 +369,6 @@ pub mod pallet { /// 99% of the time you want [`transfer`] instead. /// /// [`transfer`]: struct.Pallet.html#method.transfer - /// # - /// - Cheaper than transfer because account cannot be killed. - /// - Base Weight: 51.4 µs - /// - DB Weight: 1 Read and 1 Write to dest (sender is in overlay already) - /// # #[pallet::weight(T::WeightInfo::transfer_keep_alive())] pub fn transfer_keep_alive( origin: OriginFor, diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index d59c42cc850dd..d25fc3b376e83 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -243,16 +243,6 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { /// Execute the scheduled calls - /// - /// # - /// - S = Number of already scheduled calls - /// - N = Named scheduled calls - /// - P = Periodic Calls - /// - Base Weight: 9.243 + 23.45 * S µs - /// - DB Weight: - /// - Read: Agenda + Lookup * N + Agenda(Future) * P - /// - Write: Agenda + Lookup * N + Agenda(future) * P - /// # fn on_initialize(now: T::BlockNumber) -> Weight { let limit = T::MaximumWeight::get(); let mut queued = Agenda::::take(now) @@ -352,15 +342,6 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Anonymously schedule a task. - /// - /// # - /// - S = Number of already scheduled calls - /// - Base Weight: 22.29 + .126 * S µs - /// - DB Weight: - /// - Read: Agenda - /// - Write: Agenda - /// - Will use base weight of 25 which should be good for up to 30 scheduled calls - /// # #[pallet::weight(::WeightInfo::schedule(T::MaxScheduledPerBlock::get()))] pub fn schedule( origin: OriginFor, @@ -382,15 +363,6 @@ pub mod pallet { } /// Cancel an anonymously scheduled task. - /// - /// # - /// - S = Number of already scheduled calls - /// - Base Weight: 22.15 + 2.869 * S µs - /// - DB Weight: - /// - Read: Agenda - /// - Write: Agenda, Lookup - /// - Will use base weight of 100 which should be good for up to 30 scheduled calls - /// # #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; @@ -400,15 +372,6 @@ pub mod pallet { } /// Schedule a named task. - /// - /// # - /// - S = Number of already scheduled calls - /// - Base Weight: 29.6 + .159 * S µs - /// - DB Weight: - /// - Read: Agenda, Lookup - /// - Write: Agenda, Lookup - /// - Will use base weight of 35 which should be good for more than 30 scheduled calls - /// # #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named( origin: OriginFor, @@ -432,15 +395,6 @@ pub mod pallet { } /// Cancel a named scheduled task. - /// - /// # - /// - S = Number of already scheduled calls - /// - Base Weight: 24.91 + 2.907 * S µs - /// - DB Weight: - /// - Read: Agenda, Lookup - /// - Write: Agenda, Lookup - /// - Will use base weight of 100 which should be good for up to 30 scheduled calls - /// # #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] pub fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 12361ed859d0e..0b00a7d8f5973 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -351,14 +351,6 @@ pub mod pallet { } /// Set the number of pages in the WebAssembly environment's heap. - /// - /// # - /// - `O(1)` - /// - 1 storage write. - /// - Base Weight: 1.405 µs - /// - 1 write to HEAP_PAGES - /// - 1 digest item - /// # #[pallet::weight((T::SystemWeightInfo::set_heap_pages(), DispatchClass::Operational))] pub fn set_heap_pages(origin: OriginFor, pages: u64) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -407,13 +399,6 @@ pub mod pallet { } /// Set some items of storage. - /// - /// # - /// - `O(I)` where `I` length of `items` - /// - `I` storage writes (`O(1)`). - /// - Base Weight: 0.568 * i µs - /// - Writes: Number of items - /// # #[pallet::weight(( T::SystemWeightInfo::set_storage(items.len() as u32), DispatchClass::Operational, @@ -430,13 +415,6 @@ pub mod pallet { } /// Kill some items from storage. - /// - /// # - /// - `O(IK)` where `I` length of `keys` and `K` length of one key - /// - `I` storage deletions. - /// - Base Weight: .378 * i µs - /// - Writes: Number of items - /// # #[pallet::weight(( T::SystemWeightInfo::kill_storage(keys.len() as u32), DispatchClass::Operational, @@ -453,13 +431,6 @@ pub mod pallet { /// /// **NOTE:** We rely on the Root origin to provide us the number of subkeys under /// the prefix we are removing to accurately calculate the weight of this function. - /// - /// # - /// - `O(P)` where `P` amount of keys with prefix `prefix` - /// - `P` storage deletions. - /// - Base Weight: 0.834 * P µs - /// - Writes: Number of subkeys + 1 - /// # #[pallet::weight(( T::SystemWeightInfo::kill_prefix(_subkeys.saturating_add(1)), DispatchClass::Operational, From de6033a2aa4f0191877200516fbc2655399faa4b Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Fri, 26 Nov 2021 00:58:00 -0800 Subject: [PATCH 149/162] Update W3F URL links (#10374) --- frame/staking/src/slashing.rs | 2 +- frame/transaction-payment/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 066142d8ecc24..acfb30fb81482 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -47,7 +47,7 @@ //! has multiple misbehaviors. However, accounting for such cases is necessary //! to deter a class of "rage-quit" attacks. //! -//! Based on research at +//! Based on research at use crate::{ BalanceOf, Config, EraIndex, Error, Exposure, NegativeImbalanceOf, Pallet, Perbill, diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index a0d8aeb11706d..f912866f7bb39 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -127,7 +127,7 @@ type BalanceOf = <::OnChargeTransaction as OnChargeTransaction +/// pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); /// Something that can convert the current multiplier to the next one. From fcc54a72973d03afe7bf9e3ef2736050b3f33465 Mon Sep 17 00:00:00 2001 From: zjb0807 Date: Tue, 30 Nov 2021 21:15:52 +0800 Subject: [PATCH 150/162] Add runtime-benchmarks for frame-benchmarking (#10324) * update frame/benchmarking/src/baseline.rs * add runtime-benchmarks feature * add runtime-benchmarks for frame-benchmarking * update frame-benchmarking/runtime-benchmarks * trigger GitHub actions --- bin/node-template/pallets/template/Cargo.toml | 2 +- bin/node-template/runtime/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- frame/assets/Cargo.toml | 2 +- frame/babe/Cargo.toml | 2 +- frame/bags-list/Cargo.toml | 2 +- frame/balances/Cargo.toml | 2 +- frame/benchmarking/Cargo.toml | 1 + frame/benchmarking/src/baseline.rs | 2 ++ frame/bounties/Cargo.toml | 2 +- frame/collective/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 2 +- frame/democracy/Cargo.toml | 2 +- frame/election-provider-multi-phase/Cargo.toml | 2 +- frame/elections-phragmen/Cargo.toml | 2 +- frame/examples/basic/Cargo.toml | 2 +- frame/gilt/Cargo.toml | 2 +- frame/grandpa/Cargo.toml | 2 +- frame/identity/Cargo.toml | 2 +- frame/im-online/Cargo.toml | 2 +- frame/indices/Cargo.toml | 2 +- frame/lottery/Cargo.toml | 2 +- frame/membership/Cargo.toml | 2 +- frame/merkle-mountain-range/Cargo.toml | 2 +- frame/multisig/Cargo.toml | 2 +- frame/proxy/Cargo.toml | 2 +- frame/scheduler/Cargo.toml | 2 +- frame/staking/Cargo.toml | 2 +- frame/timestamp/Cargo.toml | 2 +- frame/tips/Cargo.toml | 2 +- frame/transaction-storage/Cargo.toml | 2 +- frame/treasury/Cargo.toml | 2 +- frame/uniques/Cargo.toml | 2 +- frame/utility/Cargo.toml | 2 +- frame/vesting/Cargo.toml | 2 +- 35 files changed, 36 insertions(+), 33 deletions(-) diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 7ea5628b97c1e..1a239bef3262a 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -36,5 +36,5 @@ std = [ 'frame-benchmarking/std', ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index bff47f5a7ea39..c821bdac169e6 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -83,7 +83,7 @@ std = [ "sp-version/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system-benchmarking", "frame-system/runtime-benchmarks", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index f9ce4b0fca900..4771202b90a0c 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -176,7 +176,7 @@ std = [ "sp-io/std" ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 08cadb527750b..233c88279d35b 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -42,7 +42,7 @@ std = [ "frame-benchmarking/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 9ae942486d627..a2224df86798c 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -60,5 +60,5 @@ std = [ "sp-std/std", "log/std", ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 6d4cf2363c4f7..b7eebdd0df102 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -56,7 +56,7 @@ std = [ "log/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "sp-core", "sp-io", "pallet-balances", diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 4ea2e9cbe8a31..5565915f6ded6 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -39,5 +39,5 @@ std = [ "frame-system/std", "log/std", ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index a6bfcf2a552fc..2bf4a53b0f424 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -44,3 +44,4 @@ std = [ "linregress", "log/std", ] +runtime-benchmarks = [] diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index a2ffca60c5cf1..2b924a692129a 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -18,6 +18,8 @@ //! A set of benchmarks which can establish a global baseline for all other //! benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use crate::benchmarks; use codec::Encode; use frame_system::Pallet as System; diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index cce6ed69a25ba..6c9cef417e99f 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -45,7 +45,7 @@ std = [ "log/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 6d4567a7851e2..d4dc8f9a2d8d0 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -41,7 +41,7 @@ std = [ "frame-system/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 6639d939e1796..cc21f19b63527 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -79,7 +79,7 @@ std = [ "libsecp256k1/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "libsecp256k1", "rand", "rand_pcg", diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 1973677531e9a..43b35b9f7a1f6 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -44,7 +44,7 @@ std = [ "frame-system/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index b37054a7bbddf..c041d6c684c7f 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -72,7 +72,7 @@ std = [ "log/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "rand", "strum", diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 6c87be7b6d589..921a7057bcda9 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -47,7 +47,7 @@ std = [ "log/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/examples/basic/Cargo.toml b/frame/examples/basic/Cargo.toml index a4e8ffe3261cd..8ac375cb36037 100644 --- a/frame/examples/basic/Cargo.toml +++ b/frame/examples/basic/Cargo.toml @@ -42,5 +42,5 @@ std = [ "sp-runtime/std", "sp-std/std" ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml index 6b2eae1156a89..e9ee8ba14abad 100644 --- a/frame/gilt/Cargo.toml +++ b/frame/gilt/Cargo.toml @@ -40,7 +40,7 @@ std = [ "frame-system/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index af125d64ea218..ba2d98fc354e8 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -60,5 +60,5 @@ std = [ "pallet-session/std", "log/std", ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 9a370674e5876..b791256d9452b 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -40,7 +40,7 @@ std = [ "frame-system/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index f4cf5a9077c9b..2fa0b5ebc12aa 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -47,5 +47,5 @@ std = [ "frame-system/std", "log/std", ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index f1913d4138be0..7f27ac23c6456 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -42,7 +42,7 @@ std = [ "frame-system/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index d4ee5b8008f19..e65bb01660b58 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -41,7 +41,7 @@ std = [ "frame-system/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", ] diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index a3747ca0576f5..9eafaedee7db1 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -41,7 +41,7 @@ std = [ "frame-benchmarking/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 2ff8b16fa4bde..ee9080b19d611 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -46,5 +46,5 @@ std = [ "frame-system/std", "pallet-mmr-primitives/std", ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 2b0d8f5cee792..f409e0c6c8d06 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -39,7 +39,7 @@ std = [ "sp-std/std" ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 030209b83cc6c..be288ecc0d759 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -40,7 +40,7 @@ std = [ "sp-io/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index fab2bc2c635ca..01cf95fa7fc7c 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -39,7 +39,7 @@ std = [ "log/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index d9461ab454f39..81f5b181850c1 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -68,7 +68,7 @@ std = [ "frame-election-provider-support/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "rand_chacha", ] diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 2f07b2a0975a5..a942b58e09509 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -45,5 +45,5 @@ std = [ "sp-timestamp/std", "log/std", ] -runtime-benchmarks = ["frame-benchmarking", "sp-io"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "sp-io"] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index 805f1663d1ae3..49b19756e43cc 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -50,7 +50,7 @@ std = [ "pallet-treasury/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index a8c6e60af2f82..4b4ee7de85098 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -33,7 +33,7 @@ sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-featu [features] default = ["std"] -runtime-benchmarks = ["frame-benchmarking", "hex-literal"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "hex-literal"] std = [ "serde", "codec/std", diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index d375e22df949b..d44699825a6f9 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -47,7 +47,7 @@ std = [ "pallet-balances/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml index 4b6d0485567c2..9d7e8f7184d16 100644 --- a/frame/uniques/Cargo.toml +++ b/frame/uniques/Cargo.toml @@ -39,7 +39,7 @@ std = [ "frame-benchmarking/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index c55b20df27855..8407bf53e73d2 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -40,7 +40,7 @@ std = [ "sp-std/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 035124f7d0de3..efec3be330dcc 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -39,5 +39,5 @@ std = [ "frame-support/std", "frame-system/std", ] -runtime-benchmarks = ["frame-benchmarking"] +runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] try-runtime = ["frame-support/try-runtime"] From afb74de23dfe2994e7ce38c0870efb9734e966f7 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Thu, 6 Jan 2022 11:54:07 +0100 Subject: [PATCH 151/162] Assorted refactorings --- frame/democracy/src/lib.rs | 5 ++++- frame/democracy/src/vote.rs | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index ec706ca8f48aa..46613ed1ec5ea 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -1513,9 +1513,12 @@ impl Pallet { }; sp_std::mem::swap(&mut old, voting); match old { - Voting::Delegating { balance, target, conviction, delegations, prior, .. } => { + Voting::Delegating { balance, target, conviction, delegations, mut prior, .. } => { // remove any delegation votes to our current target. Self::reduce_upstream_delegation(&target, conviction.votes(balance)); + let now = frame_system::Pallet::::block_number(); + let lock_periods = conviction.lock_periods().into(); + prior.accumulate(now + T::VoteLockingPeriod::get() * lock_periods, balance); voting.set_common(delegations, prior); }, Voting::Direct { votes, delegations, prior } => { diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index da74f7bd2fb64..7a8b3b55a33bd 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -183,7 +183,7 @@ impl votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)), - Voting::Delegating { balance, .. } => *balance, + Voting::Delegating { balance, prior, .. } => *balance.max(&prior.locked()), } } From 522678b9441bc6ff6f2ee393b4e98be86cac9561 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Thu, 13 Oct 2022 12:21:28 +0300 Subject: [PATCH 152/162] Post merge fixes --- Cargo.lock | 1 + bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/chain_spec.rs | 2 -- 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a5661a5ceefcc..68bd1036a07fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4586,6 +4586,7 @@ dependencies = [ "serde", "serde_json 1.0.68", "soketto 0.4.2", + "sp-api", "sp-authority-discovery", "sp-authorship", "sp-blockchain", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 0556da70706c4..df9d0b50acbbc 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -46,6 +46,7 @@ structopt = { version = "0.3.25", optional = true } sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/authority-discovery" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } grandpa-primitives = { version = "4.0.0-dev", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 84a53cbc8b94f..bb7e168e375b5 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -358,8 +358,6 @@ pub fn testnet_genesis( max_members: 999, }, vesting: Default::default(), - gilt: Default::default(), - transaction_storage: Default::default(), scheduler: Default::default(), transaction_payment: Default::default(), } From c1bcfe9c7688603db9b32a7c53cc2abaeda3c01d Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Thu, 13 Oct 2022 13:00:09 +0300 Subject: [PATCH 153/162] Update rustc version --- Dockerfile | 2 +- Dockerfile.tests | 4 ++-- scripts/init.sh | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Dockerfile b/Dockerfile index f51abdb9b428a..183c0d278d9ba 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && \ RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \ export PATH=$PATH:$HOME/.cargo/bin && \ scripts/init.sh && \ - RUSTC_BOOTSTRAP=1 cargo +stable-2021-06-17 build --$PROFILE + RUSTC_BOOTSTRAP=1 cargo +stable-2021-11-01 build --$PROFILE # ===== SECOND STAGE ====== FROM phusion/baseimage:0.11 diff --git a/Dockerfile.tests b/Dockerfile.tests index 8183d00e9b025..91b4465a066b8 100644 --- a/Dockerfile.tests +++ b/Dockerfile.tests @@ -21,5 +21,5 @@ RUN apt-get update && \ RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \ export PATH=$PATH:$HOME/.cargo/bin && \ scripts/init.sh && \ - TRYBUILD=overwrite RUSTC_BOOTSTRAP=1 cargo +stable-2021-06-17 test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml && \ - WASM_BUILD_NO_COLOR=1 SUBSTRATE_TEST_TIMEOUT=1 cargo +stable-2021-06-17 test -p substrate-test-utils --release --verbose --locked -- --ignored timeout + TRYBUILD=overwrite RUSTC_BOOTSTRAP=1 cargo +stable-2021-11-01 test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml && \ + WASM_BUILD_NO_COLOR=1 SUBSTRATE_TEST_TIMEOUT=1 cargo +stable-2021-11-01 test -p substrate-test-utils --release --verbose --locked -- --ignored timeout diff --git a/scripts/init.sh b/scripts/init.sh index 9bdcdcafc1a4b..48e6323e5a2a5 100755 --- a/scripts/init.sh +++ b/scripts/init.sh @@ -4,6 +4,6 @@ set -e echo "*** Initializing WASM build environment" -rustup install stable-2021-06-17 +rustup install stable-2021-11-01 -rustup target add wasm32-unknown-unknown --toolchain stable-2021-06-17 +rustup target add wasm32-unknown-unknown --toolchain stable-2021-11-01 From bb7edaa93f4589a67a20fafba9c17d753e7bb5db Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Thu, 13 Oct 2022 14:28:26 +0300 Subject: [PATCH 154/162] Post merge fixes --- Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ee230a4e1c9cb..c02c986d43316 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,9 +80,7 @@ members = [ "frame/elections", "frame/election-provider-multi-phase", "frame/election-provider-support", - "frame/example/basic", "frame/ddc-metrics-offchain-worker", - "frame/example-parallel", "frame/executive", "frame/gilt", "frame/grandpa", From 5dd9424c57959e18657a57b255dabbfb2d43befc Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Thu, 13 Oct 2022 16:03:03 +0300 Subject: [PATCH 155/162] Fix issue with dependencies --- Cargo.lock | 144 +++++++++++++++++------------------------------------ 1 file changed, 46 insertions(+), 98 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 68bd1036a07fb..8c2bea8468ea2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -847,7 +847,7 @@ dependencies = [ "cargo-platform", "semver 1.0.4", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", ] [[package]] @@ -1262,7 +1262,7 @@ dependencies = [ "serde", "serde_cbor", "serde_derive", - "serde_json 1.0.68", + "serde_json 1.0.71", "tinytemplate", "tokio", "walkdir", @@ -1980,7 +1980,7 @@ dependencies = [ "linregress", "log 0.4.14", "parity-scale-codec", - "paste 1.0.6", + "paste", "scale-info", "sp-api", "sp-io", @@ -2077,8 +2077,8 @@ dependencies = [ "once_cell", "parity-scale-codec", "parity-util-mem", - "paste 1.0.6", - "pretty_assertions", + "paste", + "pretty_assertions 1.0.0", "scale-info", "serde", "smallvec 1.7.0", @@ -2598,7 +2598,7 @@ dependencies = [ "pest_derive", "quick-error 2.0.0", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", ] [[package]] @@ -3065,7 +3065,7 @@ dependencies = [ "jsonrpc-pubsub", "log 0.4.14", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "tokio", "url 1.7.2", "websocket", @@ -3083,7 +3083,7 @@ dependencies = [ "log 0.4.14", "serde", "serde_derive", - "serde_json 1.0.68", + "serde_json 1.0.71", ] [[package]] @@ -3226,7 +3226,7 @@ dependencies = [ "hyper 0.14.14", "log 0.4.14", "serde", - "serde_json", + "serde_json 1.0.71", "soketto 0.7.0", "thiserror", ] @@ -3258,7 +3258,7 @@ dependencies = [ "pin-project 1.0.8", "rustls-native-certs", "serde", - "serde_json", + "serde_json 1.0.71", "soketto 0.7.0", "thiserror", "tokio", @@ -4517,7 +4517,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-consensus", "sp-core", "sp-inherents", @@ -4584,7 +4584,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "soketto 0.4.2", "sp-api", "sp-authority-discovery", @@ -4716,7 +4716,6 @@ dependencies = [ "log 0.4.14", "node-primitives", "pallet-asset-tx-payment", - "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", @@ -5085,7 +5084,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "serde_json", + "serde_json 1.0.71", "smallvec 1.7.0", "sp-core", "sp-io", @@ -5450,7 +5449,7 @@ dependencies = [ "pallet-contracts-rpc-runtime-api", "parity-scale-codec", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-api", "sp-blockchain", "sp-core", @@ -5612,38 +5611,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-example" -version = "4.0.0-dev" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log 0.4.14", - "pallet-balances", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-example-parallel" -version = "3.0.0-dev" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-tasks", -] - [[package]] name = "pallet-gilt" version = "4.0.0-dev" @@ -5823,7 +5790,7 @@ dependencies = [ "pallet-mmr-primitives", "parity-scale-codec", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-api", "sp-blockchain", "sp-core", @@ -6170,7 +6137,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "smallvec 1.7.0", "sp-core", "sp-io", @@ -6477,26 +6444,7 @@ dependencies = [ name = "paste" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] - -[[package]] -name = "paste" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" - -[[package]] -name = "paste-impl" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" -dependencies = [ - "proc-macro-hack", -] +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pbkdf2" @@ -7370,7 +7318,7 @@ dependencies = [ "pallet-elections-phragmen", "parity-scale-codec", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-core", "sp-io", "sp-runtime", @@ -7672,7 +7620,7 @@ dependencies = [ "sc-network", "sc-telemetry", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-core", "sp-runtime", ] @@ -7710,7 +7658,7 @@ dependencies = [ "sc-tracing", "sc-utils", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-blockchain", "sp-core", "sp-keyring", @@ -7910,7 +7858,7 @@ dependencies = [ "sc-keystore", "sc-rpc-api", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-api", "sp-application-crypto", "sp-blockchain", @@ -8043,7 +7991,7 @@ dependencies = [ "log 0.4.14", "parity-scale-codec", "parking_lot 0.11.1", - "paste 1.0.6", + "paste", "regex", "sc-executor-common", "sc-executor-wasmi", @@ -8150,7 +8098,7 @@ dependencies = [ "sc-network-test", "sc-telemetry", "sc-utils", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-api", "sp-application-crypto", "sp-arithmetic", @@ -8186,7 +8134,7 @@ dependencies = [ "sc-finality-grandpa", "sc-rpc", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-blockchain", "sp-core", "sp-finality-grandpa", @@ -8219,7 +8167,7 @@ dependencies = [ "derive_more", "hex", "parking_lot 0.11.1", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-application-crypto", "sp-core", "sp-keystore", @@ -8263,7 +8211,7 @@ dependencies = [ "sc-peerset", "sc-utils", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "smallvec 1.7.0", "sp-arithmetic", "sp-blockchain", @@ -8373,7 +8321,7 @@ dependencies = [ "log 0.4.14", "rand 0.7.3", "sc-utils", - "serde_json 1.0.68", + "serde_json 1.0.71", "wasm-timer", ] @@ -8407,7 +8355,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-api", "sp-blockchain", "sp-consensus", @@ -8437,7 +8385,7 @@ dependencies = [ "sc-chain-spec", "sc-transaction-pool-api", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-core", "sp-rpc", "sp-runtime", @@ -8457,7 +8405,7 @@ dependencies = [ "jsonrpc-pubsub", "jsonrpc-ws-server", "log 0.4.14", - "serde_json 1.0.68", + "serde_json 1.0.71", "substrate-prometheus-endpoint", "tokio", ] @@ -8466,7 +8414,7 @@ dependencies = [ name = "sc-runtime-test" version = "2.0.0" dependencies = [ - "paste 1.0.6", + "paste", "sp-core", "sp-io", "sp-runtime", @@ -8513,7 +8461,7 @@ dependencies = [ "sc-transaction-pool-api", "sc-utils", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-api", "sp-application-crypto", "sp-block-builder", @@ -8606,7 +8554,7 @@ dependencies = [ "sc-finality-grandpa", "sc-rpc-api", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-blockchain", "sp-runtime", "thiserror", @@ -8624,7 +8572,7 @@ dependencies = [ "pin-project 1.0.8", "rand 0.7.3", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "thiserror", "wasm-timer", ] @@ -9063,7 +9011,7 @@ dependencies = [ "approx", "num-complex", "num-traits", - "paste 1.0.6", + "paste", ] [[package]] @@ -9437,7 +9385,7 @@ dependencies = [ "schnorrkel", "secrecy", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sha2 0.9.8", "sp-core-hashing", "sp-core-hashing-proc-macro", @@ -9663,7 +9611,7 @@ version = "4.0.0-dev" dependencies = [ "rustc-hash", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-core", ] @@ -9677,11 +9625,11 @@ dependencies = [ "log 0.4.14", "parity-scale-codec", "parity-util-mem", - "paste 1.0.6", + "paste", "rand 0.7.3", "scale-info", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-api", "sp-application-crypto", "sp-arithmetic", @@ -9784,7 +9732,7 @@ name = "sp-serializer" version = "4.0.0-dev" dependencies = [ "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", ] [[package]] @@ -9996,7 +9944,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "unicode-xid", ] @@ -10190,7 +10138,7 @@ dependencies = [ "sc-offchain", "sc-service", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sp-blockchain", "sp-consensus", "sp-core", @@ -10553,7 +10501,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", ] [[package]] @@ -10826,7 +10774,7 @@ dependencies = [ "parking_lot 0.11.1", "regex", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "sharded-slab", "smallvec 1.7.0", "thread_local", @@ -10978,7 +10926,7 @@ dependencies = [ "glob", "lazy_static", "serde", - "serde_json 1.0.68", + "serde_json 1.0.71", "termcolor", "toml", ] @@ -11585,7 +11533,7 @@ dependencies = [ "libc", "log 0.4.14", "object 0.27.1", - "paste 1.0.6", + "paste", "psm", "rayon", "region", From d12cba15e75dcb0de6f303b24445f13fea6f49bc Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Fri, 14 Oct 2022 12:18:42 +0300 Subject: [PATCH 156/162] Revert "Moving `pallet-asset-tx-payment` from cumulus to substrate (#10127)" This reverts commit 4581dd99ed59f65ede46a34c81133177d5824a68. --- Cargo.lock | 27 +- Cargo.toml | 1 - bin/node/cli/Cargo.toml | 1 - bin/node/cli/src/service.rs | 6 +- bin/node/runtime/Cargo.toml | 1 - bin/node/runtime/src/impls.rs | 19 +- bin/node/runtime/src/lib.rs | 15 +- bin/node/test-runner-example/Cargo.toml | 1 - bin/node/test-runner-example/src/lib.rs | 2 +- bin/node/testing/Cargo.toml | 1 - bin/node/testing/src/keyring.rs | 2 +- .../asset-tx-payment/Cargo.toml | 55 -- .../asset-tx-payment/README.md | 21 - .../asset-tx-payment/src/lib.rs | 288 ------- .../asset-tx-payment/src/payment.rs | 168 ---- .../asset-tx-payment/src/tests.rs | 748 ------------------ 16 files changed, 11 insertions(+), 1345 deletions(-) delete mode 100644 frame/transaction-payment/asset-tx-payment/Cargo.toml delete mode 100644 frame/transaction-payment/asset-tx-payment/README.md delete mode 100644 frame/transaction-payment/asset-tx-payment/src/lib.rs delete mode 100644 frame/transaction-payment/asset-tx-payment/src/payment.rs delete mode 100644 frame/transaction-payment/asset-tx-payment/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 8c2bea8468ea2..45cefc99ea284 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4550,7 +4550,6 @@ dependencies = [ "node-primitives", "node-rpc", "node-runtime", - "pallet-asset-tx-payment", "pallet-balances", "pallet-im-online", "pallet-timestamp", @@ -4715,7 +4714,7 @@ dependencies = [ "hex-literal", "log 0.4.14", "node-primitives", - "pallet-asset-tx-payment", + "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", @@ -4804,7 +4803,6 @@ dependencies = [ "node-executor", "node-primitives", "node-runtime", - "pallet-asset-tx-payment", "pallet-transaction-payment", "parity-scale-codec", "sc-block-builder", @@ -5071,28 +5069,6 @@ dependencies = [ "stable_deref_trait", ] -[[package]] -name = "pallet-asset-tx-payment" -version = "4.0.0-dev" -dependencies = [ - "frame-support", - "frame-system", - "pallet-assets", - "pallet-authorship", - "pallet-balances", - "pallet-transaction-payment", - "parity-scale-codec", - "scale-info", - "serde", - "serde_json 1.0.71", - "smallvec 1.7.0", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-storage", -] - [[package]] name = "pallet-assets" version = "4.0.0-dev" @@ -10394,7 +10370,6 @@ dependencies = [ "node-cli", "node-primitives", "node-runtime", - "pallet-asset-tx-payment", "pallet-transaction-payment", "sc-consensus", "sc-consensus-babe", diff --git a/Cargo.toml b/Cargo.toml index c02c986d43316..993483f5f6ac6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -119,7 +119,6 @@ members = [ "frame/system/rpc/runtime-api", "frame/timestamp", "frame/transaction-payment", - "frame/transaction-payment/asset-tx-payment", "frame/transaction-payment/rpc", "frame/transaction-payment/rpc/runtime-api", "frame/transaction-storage", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index df9d0b50acbbc..4a75be177f176 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -81,7 +81,6 @@ sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../frame/system/rpc/runtime-api" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } -pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment/" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } # node-specific dependencies diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index fec91a9b67cc4..e73b69153d1df 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -92,7 +92,7 @@ pub fn create_extrinsic( )), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), ); let raw_payload = node_runtime::SignedPayload::from_raw( @@ -725,7 +725,7 @@ mod tests { let check_era = frame_system::CheckEra::from(Era::Immortal); let check_nonce = frame_system::CheckNonce::from(index); let check_weight = frame_system::CheckWeight::new(); - let tx_payment = pallet_asset_tx_payment::ChargeAssetTxPayment::from(0, None); + let payment = pallet_transaction_payment::ChargeTransactionPayment::from(0); let extra = ( check_spec_version, check_tx_version, @@ -733,7 +733,7 @@ mod tests { check_era, check_nonce, check_weight, - tx_payment, + payment, ); let raw_payload = SignedPayload::from_raw( function, diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 1f858f3776f16..16c98272d55a6 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -91,7 +91,6 @@ pallet-treasury = { version = "4.0.0-dev", default-features = false, path = "../ pallet-utility = { version = "4.0.0-dev", default-features = false, path = "../../../frame/utility" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-asset-tx-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment/asset-tx-payment/" } pallet-transaction-storage = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-storage" } pallet-uniques = { version = "4.0.0-dev", default-features = false, path = "../../../frame/uniques" } pallet-vesting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/vesting" } diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 6c74408082255..d976da4006758 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -17,12 +17,8 @@ //! Some configurable implementations as associated type for the substrate runtime. -use crate::{AccountId, Assets, Authorship, Balances, NegativeImbalance, Runtime}; -use frame_support::traits::{ - fungibles::{Balanced, CreditOf}, - Currency, OnUnbalanced, -}; -use pallet_asset_tx_payment::HandleCredit; +use crate::{Authorship, Balances, NegativeImbalance}; +use frame_support::traits::{Currency, OnUnbalanced}; pub struct Author; impl OnUnbalanced for Author { @@ -31,17 +27,6 @@ impl OnUnbalanced for Author { } } -/// A `HandleCredit` implementation that naively transfers the fees to the block author. -/// Will drop and burn the assets in case the transfer fails. -pub struct CreditToBlockAuthor; -impl HandleCredit for CreditToBlockAuthor { - fn handle_credit(credit: CreditOf) { - let author = pallet_authorship::Pallet::::author(); - // Drop the result which will trigger the `OnDrop` of the imbalance in case of error. - let _ = Assets::resolve(&author, credit); - } -} - #[cfg(test)] mod multiplier_tests { use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 56a83bb17de9c..584b6aa8bc1ee 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -93,7 +93,7 @@ pub use sp_runtime::BuildStorage; /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; -use impls::{Author, CreditToBlockAuthor}; +use impls::Author; /// Constant values used within the runtime. pub mod constants; @@ -437,14 +437,6 @@ impl pallet_transaction_payment::Config for Runtime { TargetedFeeAdjustment; } -impl pallet_asset_tx_payment::Config for Runtime { - type Fungibles = Assets; - type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance, - CreditToBlockAuthor, - >; -} - parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } @@ -981,7 +973,7 @@ where frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(tip, None), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -1220,7 +1212,6 @@ construct_runtime!( Indices: pallet_indices, Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, - AssetTxPayment: pallet_asset_tx_payment, ElectionProviderMultiPhase: pallet_election_provider_multi_phase, Staking: pallet_staking, Session: pallet_session, @@ -1278,7 +1269,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_asset_tx_payment::ChargeAssetTxPayment, + pallet_transaction_payment::ChargeTransactionPayment, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml index 831a687254409..b664cdb8e50e2 100644 --- a/bin/node/test-runner-example/Cargo.toml +++ b/bin/node/test-runner-example/Cargo.toml @@ -11,7 +11,6 @@ test-runner = { path = "../../../test-utils/test-runner" } frame-system = { path = "../../../frame/system" } frame-benchmarking = { path = "../../../frame/benchmarking" } pallet-transaction-payment = { path = "../../../frame/transaction-payment" } -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment/" } node-runtime = { path = "../runtime" } node-primitives = { path = "../primitives" } diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs index 68c14b73bf562..0de7f5a4e2b70 100644 --- a/bin/node/test-runner-example/src/lib.rs +++ b/bin/node/test-runner-example/src/lib.rs @@ -77,7 +77,7 @@ impl ChainInfo for NodeTemplateChainInfo { frame_system::Pallet::::account_nonce(from), ), frame_system::CheckWeight::::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::::from(0, None), + pallet_transaction_payment::ChargeTransactionPayment::::from(0), ) } } diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 0e5ed07ac2952..1854029b0709e 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -38,7 +38,6 @@ sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/c frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } -pallet-asset-tx-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/asset-tx-payment/" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/timestamp" } sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-builder" } diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 1040e90c4d5d4..4e2d88b4bba33 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -76,7 +76,7 @@ pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { frame_system::CheckEra::from(Era::mortal(256, 0)), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_asset_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + pallet_transaction_payment::ChargeTransactionPayment::from(extra_fee), ) } diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml deleted file mode 100644 index e4f3b128cfce9..0000000000000 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ /dev/null @@ -1,55 +0,0 @@ -[package] -name = "pallet-asset-tx-payment" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2018" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "pallet to manage transaction payments in assets" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -# Substrate dependencies -sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } -sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } - -frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } -pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = ".." } - -# Other dependencies -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.126", optional = true } - -[dev-dependencies] -smallvec = "1.7.0" -serde_json = "1.0.71" - -sp-storage = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/storage" } - -pallet-assets = { version = "4.0.0-dev", path = "../../assets" } -pallet-authorship = { version = "4.0.0-dev", path = "../../authorship" } -pallet-balances = { version = "4.0.0-dev", path = "../../balances" } - - -[features] -default = ["std"] -std = [ - "serde", - "codec/std", - "sp-std/std", - "sp-runtime/std", - "frame-support/std", - "frame-system/std", - "sp-io/std", - "sp-core/std", - "pallet-transaction-payment/std", -] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/asset-tx-payment/README.md b/frame/transaction-payment/asset-tx-payment/README.md deleted file mode 100644 index fc860347d85fa..0000000000000 --- a/frame/transaction-payment/asset-tx-payment/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# pallet-asset-tx-payment - -## Asset Transaction Payment Pallet - -This pallet allows runtimes that include it to pay for transactions in assets other than the -native token of the chain. - -### Overview -It does this by extending transactions to include an optional `AssetId` that specifies the asset -to be used for payment (defaulting to the native token on `None`). It expects an -[`OnChargeAssetTransaction`] implementation analogously to [`pallet-transaction-payment`]. The -included [`FungiblesAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the fee -amount by converting the fee calculated by [`pallet-transaction-payment`] into the desired -asset. - -### Integration -This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means -you should include both pallets in your `construct_runtime` macro, but only include this -pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). - -License: Apache-2.0 diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs deleted file mode 100644 index 1f22669857d76..0000000000000 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright (C) 2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Asset Transaction Payment Pallet -//! -//! This pallet allows runtimes that include it to pay for transactions in assets other than the -//! main token of the chain. -//! -//! ## Overview - -//! It does this by extending transactions to include an optional `AssetId` that specifies the asset -//! to be used for payment (defaulting to the native token on `None`). It expects an -//! [`OnChargeAssetTransaction`] implementation analogously to [`pallet-transaction-payment`]. The -//! included [`FungiblesAdapter`] (implementing [`OnChargeAssetTransaction`]) determines the fee -//! amount by converting the fee calculated by [`pallet-transaction-payment`] into the desired -//! asset. -//! -//! ## Integration - -//! This pallet wraps FRAME's transaction payment pallet and functions as a replacement. This means -//! you should include both pallets in your `construct_runtime` macro, but only include this -//! pallet's [`SignedExtension`] ([`ChargeAssetTxPayment`]). - -#![cfg_attr(not(feature = "std"), no_std)] - -use sp_std::prelude::*; - -use codec::{Decode, Encode}; -use frame_support::{ - dispatch::DispatchResult, - traits::{ - tokens::{ - fungibles::{Balanced, CreditOf, Inspect}, - WithdrawConsequence, - }, - IsType, - }, - weights::{DispatchInfo, PostDispatchInfo}, - DefaultNoBound, -}; -use pallet_transaction_payment::OnChargeTransaction; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension, Zero}, - transaction_validity::{ - InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, - }, - FixedPointOperand, -}; - -#[cfg(test)] -mod tests; - -mod payment; -pub use payment::*; - -// Type aliases used for interaction with `OnChargeTransaction`. -pub(crate) type OnChargeTransactionOf = - ::OnChargeTransaction; -// Balance type alias. -pub(crate) type BalanceOf = as OnChargeTransaction>::Balance; -// Liquity info type alias. -pub(crate) type LiquidityInfoOf = - as OnChargeTransaction>::LiquidityInfo; - -// Type alias used for interaction with fungibles (assets). -// Balance type alias. -pub(crate) type AssetBalanceOf = - <::Fungibles as Inspect<::AccountId>>::Balance; -/// Asset id type alias. -pub(crate) type AssetIdOf = - <::Fungibles as Inspect<::AccountId>>::AssetId; - -// Type aliases used for interaction with `OnChargeAssetTransaction`. -// Balance type alias. -pub(crate) type ChargeAssetBalanceOf = - <::OnChargeAssetTransaction as OnChargeAssetTransaction>::Balance; -// Asset id type alias. -pub(crate) type ChargeAssetIdOf = - <::OnChargeAssetTransaction as OnChargeAssetTransaction>::AssetId; -// Liquity info type alias. -pub(crate) type ChargeAssetLiquidityOf = - <::OnChargeAssetTransaction as OnChargeAssetTransaction>::LiquidityInfo; - -/// Used to pass the initial payment info from pre- to post-dispatch. -#[derive(Encode, Decode, DefaultNoBound, TypeInfo)] -pub enum InitialPayment { - /// No initial fee was payed. - Nothing, - /// The initial fee was payed in the native currency. - Native(LiquidityInfoOf), - /// The initial fee was payed in an asset. - Asset(CreditOf), -} - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - - #[pallet::config] - pub trait Config: frame_system::Config + pallet_transaction_payment::Config { - /// The fungibles instance used to pay for transactions in assets. - type Fungibles: Balanced; - /// The actual transaction charging logic that charges the fees. - type OnChargeAssetTransaction: OnChargeAssetTransaction; - } - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); -} - -/// Require the transactor pay for themselves and maybe include a tip to gain additional priority -/// in the queue. Allows paying via both `Currency` as well as `fungibles::Balanced`. -/// -/// Wraps the transaction logic in [`pallet_transaction_payment`] and extends it with assets. -/// An asset id of `None` falls back to the underlying transaction payment via the native currency. -#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct ChargeAssetTxPayment { - #[codec(compact)] - tip: BalanceOf, - asset_id: Option>, -} - -impl ChargeAssetTxPayment -where - T::Call: Dispatchable, - AssetBalanceOf: Send + Sync + FixedPointOperand, - BalanceOf: Send + Sync + FixedPointOperand + IsType>, - ChargeAssetIdOf: Send + Sync, - CreditOf: IsType>, -{ - /// Utility constructor. Used only in client/factory code. - pub fn from(tip: BalanceOf, asset_id: Option>) -> Self { - Self { tip, asset_id } - } - - /// Fee withdrawal logic that dispatches to either `OnChargeAssetTransaction` or - /// `OnChargeTransaction`. - fn withdraw_fee( - &self, - who: &T::AccountId, - call: &T::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { - let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); - debug_assert!(self.tip <= fee, "tip should be included in the computed fee"); - if fee.is_zero() { - Ok((fee, InitialPayment::Nothing)) - } else if let Some(asset_id) = self.asset_id { - T::OnChargeAssetTransaction::withdraw_fee( - who, - call, - info, - asset_id, - fee.into(), - self.tip.into(), - ) - .map(|i| (fee, InitialPayment::Asset(i.into()))) - } else { - as OnChargeTransaction>::withdraw_fee( - who, call, info, fee, self.tip, - ) - .map(|i| (fee, InitialPayment::Native(i))) - .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() }) - } - } -} - -impl sp_std::fmt::Debug for ChargeAssetTxPayment { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "ChargeAssetTxPayment<{:?}, {:?}>", self.tip, self.asset_id.encode()) - } - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } -} - -impl SignedExtension for ChargeAssetTxPayment -where - T::Call: Dispatchable, - AssetBalanceOf: Send + Sync + FixedPointOperand, - BalanceOf: Send + Sync + From + FixedPointOperand + IsType>, - ChargeAssetIdOf: Send + Sync, - CreditOf: IsType>, -{ - const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = (); - type Pre = ( - // tip - BalanceOf, - // who paid the fee - Self::AccountId, - // imbalance resulting from withdrawing the fee - InitialPayment, - ); - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { - Ok(()) - } - - fn validate( - &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - use pallet_transaction_payment::ChargeTransactionPayment; - let (fee, _) = self.withdraw_fee(who, call, info, len)?; - let priority = ChargeTransactionPayment::::get_priority(info, len, self.tip, fee); - Ok(ValidTransaction { priority, ..Default::default() }) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - let (_fee, initial_payment) = self.withdraw_fee(who, call, info, len)?; - Ok((self.tip, who.clone(), initial_payment)) - } - - fn post_dispatch( - pre: Self::Pre, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - let (tip, who, initial_payment) = pre; - match initial_payment { - InitialPayment::Native(already_withdrawn) => { - pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch( - (tip, who, already_withdrawn), - info, - post_info, - len, - result, - )?; - }, - InitialPayment::Asset(already_withdrawn) => { - let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( - len as u32, info, post_info, tip, - ); - T::OnChargeAssetTransaction::correct_and_deposit_fee( - &who, - info, - post_info, - actual_fee.into(), - tip.into(), - already_withdrawn.into(), - )?; - }, - InitialPayment::Nothing => { - // `actual_fee` should be zero here for any signed extrinsic. It would be non-zero - // here in case of unsigned extrinsics as they don't pay fees but - // `compute_actual_fee` is not aware of them. In both cases it's fine to just move - // ahead without adjusting the fee, though, so we do nothing. - debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - }, - } - - Ok(()) - } -} diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs deleted file mode 100644 index 09482f96490c7..0000000000000 --- a/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (C) 2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -///! Traits and default implementation for paying transaction fees in assets. -use super::*; -use crate::Config; - -use codec::FullCodec; -use frame_support::{ - traits::{ - fungibles::{Balanced, CreditOf, Inspect}, - tokens::BalanceConversion, - }, - unsigned::TransactionValidityError, -}; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{ - AtLeast32BitUnsigned, DispatchInfoOf, MaybeSerializeDeserialize, One, PostDispatchInfoOf, - }, - transaction_validity::InvalidTransaction, -}; -use sp_std::{fmt::Debug, marker::PhantomData}; - -/// Handle withdrawing, refunding and depositing of transaction fees. -pub trait OnChargeAssetTransaction { - /// The underlying integer type in which fees are calculated. - type Balance: AtLeast32BitUnsigned - + FullCodec - + Copy - + MaybeSerializeDeserialize - + Debug - + Default - + TypeInfo; - /// The type used to identify the assets used for transaction payment. - type AssetId: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo; - /// The type used to store the intermediate values between pre- and post-dispatch. - type LiquidityInfo; - - /// Before the transaction is executed the payment of the transaction fees needs to be secured. - /// - /// Note: The `fee` already includes the `tip`. - fn withdraw_fee( - who: &T::AccountId, - call: &T::Call, - dispatch_info: &DispatchInfoOf, - asset_id: Self::AssetId, - fee: Self::Balance, - tip: Self::Balance, - ) -> Result; - - /// After the transaction was executed the actual fee can be calculated. - /// This function should refund any overpaid fees and optionally deposit - /// the corrected amount. - /// - /// Note: The `fee` already includes the `tip`. - fn correct_and_deposit_fee( - who: &T::AccountId, - dispatch_info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - corrected_fee: Self::Balance, - tip: Self::Balance, - already_withdrawn: Self::LiquidityInfo, - ) -> Result<(), TransactionValidityError>; -} - -/// Allows specifying what to do with the withdrawn asset fees. -pub trait HandleCredit> { - /// Implement to determine what to do with the withdrawn asset fees. - /// Default for `CreditOf` from the assets pallet is to burn and - /// decrease total issuance. - fn handle_credit(credit: CreditOf); -} - -/// Default implementation that just drops the credit according to the `OnDrop` in the underlying -/// imbalance type. -impl> HandleCredit for () { - fn handle_credit(_credit: CreditOf) {} -} - -/// Implements the asset transaction for a balance to asset converter (implementing -/// [`BalanceConversion`]) and a credit handler (implementing [`HandleCredit`]). -/// -/// The credit handler is given the complete fee in terms of the asset used for the transaction. -pub struct FungiblesAdapter(PhantomData<(CON, HC)>); - -/// Default implementation for a runtime instantiating this pallet, a balance to asset converter and -/// a credit handler. -impl OnChargeAssetTransaction for FungiblesAdapter -where - T: Config, - CON: BalanceConversion, AssetIdOf, AssetBalanceOf>, - HC: HandleCredit, - AssetIdOf: FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Eq + TypeInfo, -{ - type Balance = BalanceOf; - type AssetId = AssetIdOf; - type LiquidityInfo = CreditOf; - - /// Withdraw the predicted fee from the transaction origin. - /// - /// Note: The `fee` already includes the `tip`. - fn withdraw_fee( - who: &T::AccountId, - _call: &T::Call, - _info: &DispatchInfoOf, - asset_id: Self::AssetId, - fee: Self::Balance, - _tip: Self::Balance, - ) -> Result { - // We don't know the precision of the underlying asset. Because the converted fee could be - // less than one (e.g. 0.5) but gets rounded down by integer division we introduce a minimum - // fee. - let min_converted_fee = if fee.is_zero() { Zero::zero() } else { One::one() }; - let converted_fee = CON::to_asset_balance(fee, asset_id) - .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment))? - .max(min_converted_fee); - let can_withdraw = >::can_withdraw( - asset_id.into(), - who, - converted_fee, - ); - if !matches!(can_withdraw, WithdrawConsequence::Success) { - return Err(InvalidTransaction::Payment.into()) - } - >::withdraw(asset_id.into(), who, converted_fee) - .map_err(|_| TransactionValidityError::from(InvalidTransaction::Payment)) - } - - /// Hand the fee and the tip over to the `[HandleCredit]` implementation. - /// Since the predicted fee might have been too high, parts of the fee may be refunded. - /// - /// Note: The `corrected_fee` already includes the `tip`. - fn correct_and_deposit_fee( - who: &T::AccountId, - _dispatch_info: &DispatchInfoOf, - _post_info: &PostDispatchInfoOf, - corrected_fee: Self::Balance, - _tip: Self::Balance, - paid: Self::LiquidityInfo, - ) -> Result<(), TransactionValidityError> { - let min_converted_fee = if corrected_fee.is_zero() { Zero::zero() } else { One::one() }; - // Convert the corrected fee into the asset used for payment. - let converted_fee = CON::to_asset_balance(corrected_fee, paid.asset().into()) - .map_err(|_| -> TransactionValidityError { InvalidTransaction::Payment.into() })? - .max(min_converted_fee); - // Calculate how much refund we should return. - let (final_fee, refund) = paid.split(converted_fee); - // Refund to the account that paid the fees. If this fails, the account might have dropped - // below the existential balance. In that case we don't refund anything. - let _ = >::resolve(who, refund); - // Handle the final fee, e.g. by transferring to the block author or burning. - HC::handle_credit(final_fee); - Ok(()) - } -} diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs deleted file mode 100644 index bd5dc57239a28..0000000000000 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ /dev/null @@ -1,748 +0,0 @@ -// Copyright (C) 2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::*; -use crate as pallet_asset_tx_payment; - -use frame_support::{ - assert_ok, - pallet_prelude::*, - parameter_types, - traits::{fungibles::Mutate, FindAuthor}, - weights::{ - DispatchClass, DispatchInfo, PostDispatchInfo, Weight, WeightToFeeCoefficient, - WeightToFeeCoefficients, WeightToFeePolynomial, - }, - ConsensusEngineId, -}; -use frame_system as system; -use frame_system::EnsureRoot; -use pallet_balances::Call as BalancesCall; -use pallet_transaction_payment::CurrencyAdapter; -use smallvec::smallvec; -use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, ConvertInto, IdentityLookup, StaticLookup}, - Perbill, -}; -use std::cell::RefCell; - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; -type Balance = u64; -type AccountId = u64; - -frame_support::construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, - Assets: pallet_assets::{Pallet, Call, Storage, Event}, - Authorship: pallet_authorship::{Pallet, Call, Storage}, - AssetTxPayment: pallet_asset_tx_payment::{Pallet}, - } -); - -const CALL: &::Call = - &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); - -thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); -} - -pub struct BlockWeights; -impl Get for BlockWeights { - fn get() -> frame_system::limits::BlockWeights { - frame_system::limits::BlockWeights::builder() - .base_block(0) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); - }) - .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = 1024.into(); - }) - .build_or_panic() - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub static TransactionByteFee: u64 = 1; - pub static WeightToFee: u64 = 1; -} - -impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = BlockWeights; - type BlockLength = (); - type DbWeight = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = Call; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -parameter_types! { - pub const ExistentialDeposit: u64 = 10; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - type Balance = Balance; - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type MaxLocks = (); - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; -} - -impl WeightToFeePolynomial for WeightToFee { - type Balance = u64; - - fn polynomial() -> WeightToFeeCoefficients { - smallvec![WeightToFeeCoefficient { - degree: 1, - coeff_frac: Perbill::zero(), - coeff_integer: WEIGHT_TO_FEE.with(|v| *v.borrow()), - negative: false, - }] - } -} - -parameter_types! { - pub const OperationalFeeMultiplier: u8 = 5; -} - -impl pallet_transaction_payment::Config for Runtime { - type OnChargeTransaction = CurrencyAdapter; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = WeightToFee; - type FeeMultiplierUpdate = (); - type OperationalFeeMultiplier = OperationalFeeMultiplier; -} - -parameter_types! { - pub const AssetDeposit: u64 = 2; - pub const MetadataDeposit: u64 = 0; - pub const StringLimit: u32 = 20; -} - -impl pallet_assets::Config for Runtime { - type Event = Event; - type Balance = Balance; - type AssetId = u32; - type Currency = Balances; - type ForceOrigin = EnsureRoot; - type AssetDeposit = AssetDeposit; - type MetadataDepositBase = MetadataDeposit; - type MetadataDepositPerByte = MetadataDeposit; - type ApprovalDeposit = MetadataDeposit; - type StringLimit = StringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = (); -} - -pub struct HardcodedAuthor; -const BLOCK_AUTHOR: AccountId = 1234; -impl FindAuthor for HardcodedAuthor { - fn find_author<'a, I>(_: I) -> Option - where - I: 'a + IntoIterator, - { - Some(BLOCK_AUTHOR) - } -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = HardcodedAuthor; - type UncleGenerations = (); - type FilterUncle = (); - type EventHandler = (); -} - -pub struct CreditToBlockAuthor; -impl HandleCredit for CreditToBlockAuthor { - fn handle_credit(credit: CreditOf) { - let author = pallet_authorship::Pallet::::author(); - // What to do in case paying the author fails (e.g. because `fee < min_balance`) - // default: drop the result which will trigger the `OnDrop` of the imbalance. - let _ = >::resolve(&author, credit); - } -} - -impl Config for Runtime { - type Fungibles = Assets; - type OnChargeAssetTransaction = FungiblesAdapter< - pallet_assets::BalanceToAssetBalance, - CreditToBlockAuthor, - >; -} - -pub struct ExtBuilder { - balance_factor: u64, - base_weight: u64, - byte_fee: u64, - weight_to_fee: u64, -} - -impl Default for ExtBuilder { - fn default() -> Self { - Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } - } -} - -impl ExtBuilder { - pub fn base_weight(mut self, base_weight: u64) -> Self { - self.base_weight = base_weight; - self - } - pub fn balance_factor(mut self, factor: u64) -> Self { - self.balance_factor = factor; - self - } - fn set_constants(&self) { - EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow_mut() = self.base_weight); - TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); - WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); - } - pub fn build(self) -> sp_io::TestExternalities { - self.set_constants(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: if self.balance_factor > 0 { - vec![ - (1, 10 * self.balance_factor), - (2, 20 * self.balance_factor), - (3, 30 * self.balance_factor), - (4, 40 * self.balance_factor), - (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor), - ] - } else { - vec![] - }, - } - .assimilate_storage(&mut t) - .unwrap(); - t.into() - } -} - -/// create a transaction info struct from weight. Handy to avoid building the whole struct. -pub fn info_from_weight(w: Weight) -> DispatchInfo { - // pays_fee: Pays::Yes -- class: DispatchClass::Normal - DispatchInfo { weight: w, ..Default::default() } -} - -fn post_info_from_weight(w: Weight) -> PostDispatchInfo { - PostDispatchInfo { actual_weight: Some(w), pays_fee: Default::default() } -} - -fn info_from_pays(p: Pays) -> DispatchInfo { - DispatchInfo { pays_fee: p, ..Default::default() } -} - -fn post_info_from_pays(p: Pays) -> PostDispatchInfo { - PostDispatchInfo { actual_weight: None, pays_fee: p } -} - -fn default_post_info() -> PostDispatchInfo { - PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } -} - -#[test] -fn transaction_payment_in_native_possible() { - let balance_factor = 100; - ExtBuilder::default() - .balance_factor(balance_factor) - .base_weight(5) - .build() - .execute_with(|| { - let len = 10; - let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) - .unwrap(); - let initial_balance = 10 * balance_factor; - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); - - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(5), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); - - let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - let initial_balance_for_2 = 20 * balance_factor; - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); - - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(100), - &post_info_from_weight(50), - len, - &Ok(()) - )); - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 5); - }); -} - -#[test] -fn transaction_payment_in_asset_possible() { - let base_weight = 5; - let balance_factor = 100; - ExtBuilder::default() - .balance_factor(balance_factor) - .base_weight(base_weight) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 2; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 1; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 5; - let len = 10; - // we convert the from weight to fee based on the ratio between asset min balance and - // existential deposit - let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) - .unwrap(); - // assert that native balance is not used - assert_eq!(Balances::free_balance(caller), 10 * balance_factor); - // check that fee was charged in the given asset - assert_eq!(Assets::balance(asset_id, caller), balance - fee); - assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); - - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(weight), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance - fee); - // check that the block author gets rewarded - assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), fee); - }); -} - -#[test] -fn transaction_payment_without_fee() { - let base_weight = 5; - let balance_factor = 100; - ExtBuilder::default() - .balance_factor(balance_factor) - .base_weight(base_weight) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 2; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 1; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 5; - let len = 10; - // we convert the from weight to fee based on the ratio between asset min balance and - // existential deposit - let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) - .unwrap(); - // assert that native balance is not used - assert_eq!(Balances::free_balance(caller), 10 * balance_factor); - // check that fee was charged in the given asset - assert_eq!(Assets::balance(asset_id, caller), balance - fee); - assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); - - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(weight), - &post_info_from_pays(Pays::No), - len, - &Ok(()) - )); - // caller should be refunded - assert_eq!(Assets::balance(asset_id, caller), balance); - // check that the block author did not get rewarded - assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0); - }); -} - -#[test] -fn asset_transaction_payment_with_tip_and_refund() { - let base_weight = 5; - ExtBuilder::default() - .balance_factor(100) - .base_weight(base_weight) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 2; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 2; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 1000; - assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 100; - let tip = 5; - let len = 10; - // we convert the from weight to fee based on the ratio between asset min balance and - // existential deposit - let fee_with_tip = - (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) - .unwrap(); - assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); - - let final_weight = 50; - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(weight), - &post_info_from_weight(final_weight), - len, - &Ok(()) - )); - let final_fee = - fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get(); - assert_eq!(Assets::balance(asset_id, caller), balance - (final_fee)); - assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), final_fee); - }); -} - -#[test] -fn payment_from_account_with_only_assets() { - let base_weight = 5; - ExtBuilder::default() - .balance_factor(100) - .base_weight(base_weight) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 2; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - // assert that native balance is not necessary - assert_eq!(Balances::free_balance(caller), 0); - let weight = 5; - let len = 10; - // we convert the from weight to fee based on the ratio between asset min balance and - // existential deposit - let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) - .unwrap(); - assert_eq!(Balances::free_balance(caller), 0); - // check that fee was charged in the given asset - assert_eq!(Assets::balance(asset_id, caller), balance - fee); - - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(weight), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance - fee); - assert_eq!(Balances::free_balance(caller), 0); - }); -} - -#[test] -fn payment_only_with_existing_sufficient_asset() { - let base_weight = 5; - ExtBuilder::default() - .balance_factor(100) - .base_weight(base_weight) - .build() - .execute_with(|| { - let asset_id = 1; - let caller = 1; - let weight = 5; - let len = 10; - // pre_dispatch fails for non-existent asset - assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) - .is_err()); - - // create the non-sufficient asset - let min_balance = 2; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - false, /* is_sufficient */ - min_balance - )); - // pre_dispatch fails for non-sufficient asset - assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) - .is_err()); - }); -} - -#[test] -fn converted_fee_is_never_zero_if_input_fee_is_not() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(base_weight) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 1; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 1; - let len = 1; - // we convert the from weight to fee based on the ratio between asset min balance and - // existential deposit - let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - // naive fee calculation would round down to zero - assert_eq!(fee, 0); - { - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) - .unwrap(); - // `Pays::No` still implies no fees - assert_eq!(Assets::balance(asset_id, caller), balance); - - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_pays(Pays::No), - &post_info_from_pays(Pays::No), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - } - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) - .unwrap(); - // check that at least one coin was charged in the given asset - assert_eq!(Assets::balance(asset_id, caller), balance - 1); - - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(weight), - &default_post_info(), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance - 1); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(base_weight) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 1; - let len = 1; - // we convert the from weight to fee based on the ratio between asset min balance and - // existential deposit - let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); - // calculated fee is greater than 0 - assert!(fee > 0); - let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_pays(Pays::No), len) - .unwrap(); - // `Pays::No` implies no pre-dispatch fees - assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment) = ⪯ - let not_paying = match initial_payment { - &InitialPayment::Nothing => true, - _ => false, - }; - assert!(not_paying, "initial payment should be Nothing if we pass Pays::No"); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_pays(Pays::No), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} - -#[test] -fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { - let base_weight = 1; - ExtBuilder::default() - .balance_factor(100) - .base_weight(base_weight) - .build() - .execute_with(|| { - // create the asset - let asset_id = 1; - let min_balance = 100; - assert_ok!(Assets::force_create( - Origin::root(), - asset_id, - 42, /* owner */ - true, /* is_sufficient */ - min_balance - )); - - // mint into the caller account - let caller = 333; - let beneficiary = ::Lookup::unlookup(caller); - let balance = 100; - assert_ok!(Assets::mint_into(asset_id, &beneficiary, balance)); - assert_eq!(Assets::balance(asset_id, caller), balance); - let weight = 1; - let len = 1; - let pre = ChargeAssetTxPayment::::pre_dispatch_unsigned( - CALL, - &info_from_weight(weight), - len, - ) - .unwrap(); - - assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment) = ⪯ - let not_paying = match initial_payment { - &InitialPayment::Nothing => true, - _ => false, - }; - assert!(not_paying, "initial payment is Nothing for unsigned extrinsics"); - - // `Pays::Yes` on post-dispatch does not mean we pay (we never charge more than the - // initial fee) - assert_ok!(ChargeAssetTxPayment::::post_dispatch( - pre, - &info_from_weight(weight), - &post_info_from_pays(Pays::Yes), - len, - &Ok(()) - )); - assert_eq!(Assets::balance(asset_id, caller), balance); - }); -} From 4fb8862ac2cb151b7351004995fd3235d38ade1a Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Fri, 14 Oct 2022 12:21:25 +0300 Subject: [PATCH 157/162] Fix construct runtime --- Cargo.lock | 1 - bin/node/runtime/src/lib.rs | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 45cefc99ea284..781250e3ebb37 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4714,7 +4714,6 @@ dependencies = [ "hex-literal", "log 0.4.14", "node-primitives", - "pallet-assets", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 584b6aa8bc1ee..1effacb0689a4 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1239,11 +1239,11 @@ construct_runtime!( Bounties: pallet_bounties, Tips: pallet_tips, BagsList: pallet_bags_list, - CereDDCModule: pallet_cere_ddc, - ChainBridge: pallet_chainbridge, - Erc721: pallet_erc721, - Erc20: pallet_erc20, - DdcMetricsOffchainWorker: pallet_ddc_metrics_offchain_worker, + CereDDCModule: pallet_cere_ddc::{Pallet, Call, Storage, Event}, + ChainBridge: pallet_chainbridge::{Pallet, Call, Storage, Event}, + Erc721: pallet_erc721::{Pallet, Call, Storage, Event}, + Erc20: pallet_erc20::{Pallet, Call, Storage, Event}, + DdcMetricsOffchainWorker: pallet_ddc_metrics_offchain_worker::{Pallet, Call, Storage, Event}, } ); From fbc1e202374620c2805c34daeb2dabacab3a1c15 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Fri, 14 Oct 2022 13:07:55 +0300 Subject: [PATCH 158/162] Increment spec version and add release notes --- CHANGELOG.md | 4 ++++ bin/node/runtime/src/lib.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3dde70c54ff9c..b4e99092b4056 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.26.0] +### Changed +- Updated Substrate to polkadot-v0.9.13 + ## [2.25.0] ### Changed - Updated Substrate to polkadot-v0.9.12 diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 1effacb0689a4..07aab58d6a49f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -127,7 +127,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 296, + spec_version: 297, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, From 54a71f95064af09703a9c5ec5cd12fecb5079fb6 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Mon, 17 Oct 2022 10:24:56 +0300 Subject: [PATCH 159/162] Update ddc pallet --- Cargo.lock | 2 +- bin/node/runtime/Cargo.toml | 2 +- frame/ddc-pallet | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 781250e3ebb37..42857012dacc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5309,7 +5309,7 @@ dependencies = [ [[package]] name = "pallet-cere-ddc" -version = "6.2.0" +version = "6.1.0" dependencies = [ "frame-support", "frame-system", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 16c98272d55a6..7b6f0e46a1f1d 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -95,7 +95,7 @@ pallet-transaction-storage = { version = "4.0.0-dev", default-features = false, pallet-uniques = { version = "4.0.0-dev", default-features = false, path = "../../../frame/uniques" } pallet-vesting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/vesting" } pallet-chainbridge = { version = "2.0.0", default-features = false, path = "../../../frame/chainbridge" } -pallet-cere-ddc = { version = "6.2.0", default-features = false, path = "../../../frame/ddc-pallet" } +pallet-cere-ddc = { version = "6.1.0", default-features = false, path = "../../../frame/ddc-pallet" } pallet-erc721 = { version = "2.0.0", default-features = false, path = "../../../frame/erc721" } pallet-erc20 = { version = "2.0.0", default-features = false, path = "../../../frame/erc20" } pallet-ddc-metrics-offchain-worker = { version = "2.0.0", default-features = false, path = "../../../frame/ddc-metrics-offchain-worker" } diff --git a/frame/ddc-pallet b/frame/ddc-pallet index 03e287db8eac9..a584b37bd50c4 160000 --- a/frame/ddc-pallet +++ b/frame/ddc-pallet @@ -1 +1 @@ -Subproject commit 03e287db8eac9c4f55234ff2b35a460460d750f4 +Subproject commit a584b37bd50c492d44eda9f92735181fe6648a92 From 6d9fea185176c7207f69d631be3a515a35a36f3a Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 18 Oct 2022 10:13:13 +0300 Subject: [PATCH 160/162] Remove sp-sandbox --- Cargo.lock | 1 - bin/node/runtime/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42857012dacc7..11879c7f32e45 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4772,7 +4772,6 @@ dependencies = [ "sp-npos-elections", "sp-offchain", "sp-runtime", - "sp-sandbox", "sp-session", "sp-staking", "sp-std", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 7b6f0e46a1f1d..ca5804ca46b3f 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -41,7 +41,6 @@ sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } sp-npos-elections = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/npos-elections" } sp-io = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/io" } -sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } # frame dependencies frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } From 7b66a8682f6595f23a25124791b3b14696e3fede Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Thu, 20 Oct 2022 10:46:31 +0300 Subject: [PATCH 161/162] Amend release versions --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4e99092b4056..eca1809aafac1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,11 +5,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [2.26.0] +## [2.27.0] ### Changed - Updated Substrate to polkadot-v0.9.13 -## [2.25.0] +## [2.26.0] ### Changed - Updated Substrate to polkadot-v0.9.12 From 18d6a14a4a46d84851fbc96a2eb24759c7a4f25d Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 22 Nov 2022 15:47:39 +0300 Subject: [PATCH 162/162] Add storage migrations --- bin/node/runtime/src/lib.rs | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 07aab58d6a49f..60186fbd67bed 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -27,7 +27,7 @@ use frame_support::{ construct_runtime, parameter_types, traits::{ Currency, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, + LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, OnRuntimeUpgrade, }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, @@ -1284,9 +1284,31 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPallets, - pallet_bags_list::migrations::CheckCounterPrefix, + ( + pallet_bags_list::migrations::CheckCounterPrefix, + StakingBagsListMigrationV8, + ) >; +// Migration to generate pallet staking's `SortedListProvider` from pre-existing nominators. +pub struct StakingBagsListMigrationV8; + +impl OnRuntimeUpgrade for StakingBagsListMigrationV8 { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + pallet_staking::migrations::v8::migrate::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + pallet_staking::migrations::v8::pre_migrate::() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + pallet_staking::migrations::v8::post_migrate::() + } +} + impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion {