From 9476d2157a0d266677dee144a8a7bc44a6b821e1 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Sat, 24 Sep 2022 17:17:44 +0200 Subject: [PATCH 01/75] Add base-weight to `System::Extrinsic*` events (#12329) * Add base-weight to events Signed-off-by: Oliver Tale-Yazdi * Fix test Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- bin/node/executor/tests/basic.rs | 13 +- frame/system/src/extensions/check_weight.rs | 12 +- frame/system/src/lib.rs | 8 +- frame/system/src/mock.rs | 1 + frame/system/src/tests.rs | 312 +++++++++++--------- 5 files changed, 198 insertions(+), 148 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 99a9b83596acf..fc4e138faafc2 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -311,10 +311,19 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - let transfer_weight = default_transfer_call().get_dispatch_info().weight; + let transfer_weight = default_transfer_call().get_dispatch_info().weight.saturating_add( + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic, + ); let timestamp_weight = pallet_timestamp::Call::set:: { now: Default::default() } .get_dispatch_info() - .weight; + .weight + .saturating_add( + ::BlockWeights::get() + .get(DispatchClass::Mandatory) + .base_extrinsic, + ); executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 466231b8455ec..15a88913cd337 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -342,7 +342,7 @@ mod tests { .get(DispatchClass::Operational) .max_total .unwrap_or_else(|| weights.max_block); - let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; + let base_weight = weights.get(DispatchClass::Operational).base_extrinsic; let weight = operational_limit - base_weight; let okay = @@ -378,11 +378,11 @@ mod tests { // Max normal is 768 (75%) // 10 is taken for block execution weight // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) - // And Operational can be 256 to produce a full block (-5 for base) + // And Operational can be 246 to produce a full block (-10 for base) let max_normal = DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; @@ -406,7 +406,7 @@ mod tests { let max_normal = DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; @@ -414,7 +414,7 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - // Extra 15 here from block execution + base extrinsic weight + // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); @@ -433,7 +433,7 @@ mod tests { ..Default::default() }; let dispatch_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 36360f8fae2c2..dc74157da79de 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1509,9 +1509,15 @@ impl Pallet { } /// To be called immediately after an extrinsic has been applied. + /// + /// Emits an `ExtrinsicSuccess` or `ExtrinsicFailed` event depending on the outcome. + /// The emitted event contains the post-dispatch corrected weight including + /// the base-weight for its dispatch class. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { - info.weight = extract_actual_weight(r, &info); + info.weight = extract_actual_weight(r, &info) + .saturating_add(T::BlockWeights::get().get(info.class).base_extrinsic); info.pays_fee = extract_actual_pays_fee(r, &info); + Self::deposit_event(match r { Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, Err(err) => { diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 1c0511787eb76..b6fc121612050 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -67,6 +67,7 @@ parameter_types! { weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); }) .for_class(DispatchClass::Operational, |weights| { + weights.base_extrinsic = Weight::from_ref_time(10); weights.max_total = Some(MAX_BLOCK_WEIGHT); weights.reserved = Some( MAX_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 92563f4ad1747..c42131c450228 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -169,6 +169,10 @@ fn deposit_event_should_work() { }] ); + let normal_base = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + System::reset_events(); System::initialize(&2, &[0u8; 32].into(), &Default::default()); System::deposit_event(SysEvent::NewAccount { account: 32 }); @@ -194,14 +198,17 @@ fn deposit_event_should_work() { }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { dispatch_info: Default::default() }.into(), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + } + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: Default::default() + dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } } .into(), topics: vec![] @@ -223,6 +230,9 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { System::initialize(&1, &[0u8; 32].into(), &Default::default()); System::note_finished_initialize(); + let normal_base = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; let pre_info = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); @@ -267,144 +277,168 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { }), pre_info, ); + // Also works for operational. + let operational_base = ::BlockWeights::get() + .get(DispatchClass::Operational) + .base_extrinsic; + assert!(normal_base != operational_base, "Test pre-condition violated"); + let pre_info = DispatchInfo { + weight: Weight::from_ref_time(1000), + class: DispatchClass::Operational, + ..Default::default() + }; + System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(300), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(3), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(4), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(5), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(6), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(500), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(7), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(999), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(8), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(9), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(10), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - ] - ); + let got = System::events(); + let want = vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(3), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(4), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(5), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(6), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(500).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(7), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(999).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(8), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(9), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(10), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(11), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300).saturating_add(operational_base), + class: DispatchClass::Operational, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + ]; + for (i, event) in want.into_iter().enumerate() { + assert_eq!(got[i], event, "Event mismatch at index {}", i); + } }); } From d0214e7c77de639d60301d682ced7e155def15da Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sat, 24 Sep 2022 21:32:35 +0200 Subject: [PATCH 02/75] re add the migration checks for staking (#12330) Co-authored-by: parity-processbot <> --- frame/staking/src/migrations.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index f47545af694cf..8f37ae30dd056 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -35,6 +35,11 @@ pub mod v12 { impl OnRuntimeUpgrade for MigrateToV12 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, &'static str> { + frame_support::ensure!( + StorageVersion::::get() == Releases::V11_0_0, + "Expected v11 before upgrading to v12" + ); + frame_support::ensure!( T::HistoryDepth::get() == HistoryDepth::::get(), "Provided value of HistoryDepth should be same as the existing storage value" @@ -129,6 +134,11 @@ pub mod v11 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == crate::Releases::V11_0_0, + "wrong version after the upgrade" + ); + let old_pallet_name = N::get(); let new_pallet_name =

::name(); From 4219b3ab45ff2201eb8aecbbb444f9ae055f528e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20M=C3=BCller?= Date: Sun, 25 Sep 2022 11:24:35 +0200 Subject: [PATCH 03/75] Allow specifying immediate finalize for `manual-seal` (#12106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexander Theißen --- client/consensus/manual-seal/src/lib.rs | 54 +++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index c5dd169e281f2..4672e7275a56b 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -247,6 +247,60 @@ pub async fn run_instant_seal( .await } +/// Runs the background authorship task for the instant seal engine. +/// instant-seal creates a new block for every transaction imported into +/// the transaction pool. +/// +/// This function will finalize the block immediately as well. If you don't +/// want this behavior use `run_instant_seal` instead. +pub async fn run_instant_seal_and_finalize( + InstantSealParams { + block_import, + env, + client, + pool, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }: InstantSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, +{ + // Creates and finalizes blocks as soon as transactions are imported + // into the transaction pool. + let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: true, + parent_hash: None, + sender: None, + }); + + run_manual_seal(ManualSealParams { + block_import, + env, + client, + pool, + commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }) + .await +} + #[cfg(test)] mod tests { use super::*; From badc92ac20dbf006595a6af9418da9942527cbd7 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Sun, 25 Sep 2022 23:22:54 +0200 Subject: [PATCH 04/75] Removed OuterCall alias & doc fixes (#12349) --- frame/alliance/src/lib.rs | 2 +- frame/collective/src/lib.rs | 6 +++--- .../election-provider-multi-phase/src/unsigned.rs | 14 +++++++------- frame/multisig/src/benchmarking.rs | 2 +- frame/ranked-collective/src/lib.rs | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 82c7e21dba3af..2ef6718538122 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -243,7 +243,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// The outer call dispatch type. + /// The runtime call dispatch type. type Proposal: Parameter + Dispatchable + From> diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index e934924033552..ae68ae2fe3e16 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -180,10 +180,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - /// The outer origin type. + /// The runtime origin type. type RuntimeOrigin: From>; - /// The outer call dispatch type. + /// The runtime call dispatch type. type Proposal: Parameter + Dispatchable< RuntimeOrigin = >::RuntimeOrigin, @@ -191,7 +191,7 @@ pub mod pallet { > + From> + GetDispatchInfo; - /// The outer event type. + /// The runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index cf8df237bafb0..833f80c90d13e 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -856,7 +856,7 @@ mod tests { use crate::{ mock::{ roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, - MinerMaxWeight, MultiPhase, Runtime, RuntimeCall as OuterCall, RuntimeOrigin, System, + MinerMaxWeight, MultiPhase, Runtime, RuntimeCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, UnsignedPhase, }, CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, @@ -1070,8 +1070,8 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: witness(), }; - let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(RuntimeOrigin::none()); + let runtime_call: RuntimeCall = call.into(); + let _ = runtime_call.dispatch(RuntimeOrigin::none()); }) } @@ -1096,8 +1096,8 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: correct_witness, }; - let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(RuntimeOrigin::none()); + let runtime_call: RuntimeCall = call.into(); + let _ = runtime_call.dispatch(RuntimeOrigin::none()); }) } @@ -1560,7 +1560,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); let call = extrinsic.call; - assert!(matches!(call, OuterCall::MultiPhase(Call::submit_unsigned { .. }))); + assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1577,7 +1577,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); let call = match extrinsic.call { - OuterCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, + RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 8d0651002305b..c0b0097b07236 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -41,7 +41,7 @@ fn setup_multi( signatories.push(signatory); } signatories.sort(); - // Must first convert to outer call type. + // Must first convert to runtime call type. let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let call_data = OpaqueCall::::from_encoded(call.encode()); diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index bdd5d26373980..fa3a473fe7d73 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -361,7 +361,7 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - /// The outer event type. + /// The runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; From fbd7e5aa99e20b7019576ecf598bca2d7019cddd Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Mon, 26 Sep 2022 08:05:05 +0200 Subject: [PATCH 05/75] [Enhancement] Remove optional Pool subscription from fast-unstake (#12344) * [Enhancement] Remove optional Pool subscription from fast-unstake * remove nomination-pools pallet dependency * fixes * more fixes * more fixes * more fixes --- Cargo.lock | 1 - frame/fast-unstake/Cargo.toml | 2 - frame/fast-unstake/src/benchmarking.rs | 30 +-- frame/fast-unstake/src/lib.rs | 95 ++------ frame/fast-unstake/src/mock.rs | 50 +--- frame/fast-unstake/src/tests.rs | 302 +++++++------------------ frame/fast-unstake/src/types.rs | 3 - 7 files changed, 105 insertions(+), 378 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74784eb9a6c24..6ea79a120361e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5729,7 +5729,6 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "pallet-nomination-pools", "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index 1fa118dba4a8d..69aeaff35993c 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -28,7 +28,6 @@ sp-staking = { default-features = false, path = "../../primitives/staking" } pallet-balances = { default-features = false, path = "../balances" } pallet-timestamp = { default-features = false, path = "../timestamp" } pallet-staking = { default-features = false, path = "../staking" } -pallet-nomination-pools = { default-features = false, path = "../nomination-pools" } frame-election-provider-support = { default-features = false, path = "../election-provider-support" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } @@ -55,7 +54,6 @@ std = [ "sp-std/std", "pallet-staking/std", - "pallet-nomination-pools/std", "pallet-balances/std", "pallet-timestamp/std", "frame-election-provider-support/std", diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 68a3da0d40af3..5690d5ce6f29f 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -26,7 +26,6 @@ use frame_support::{ traits::{Currency, EnsureOrigin, Get, Hooks}, }; use frame_system::RawOrigin; -use pallet_nomination_pools::{Pallet as Pools, PoolId}; use pallet_staking::Pallet as Staking; use sp_runtime::traits::{StaticLookup, Zero}; use sp_staking::EraIndex; @@ -76,25 +75,6 @@ pub(crate) fn fast_unstake_events() -> Vec> { .collect::>() } -fn setup_pool() -> PoolId { - let depositor = frame_benchmarking::account::("depositor_42", 0, USER_SEED); - let depositor_lookup = l::(depositor.clone()); - - let stake = Pools::::depositor_min_bond(); - CurrencyOf::::make_free_balance_be(&depositor, stake * 10u32.into()); - - Pools::::create( - RawOrigin::Signed(depositor.clone()).into(), - stake, - depositor_lookup.clone(), - depositor_lookup.clone(), - depositor_lookup, - ) - .unwrap(); - - pallet_nomination_pools::LastPoolId::::get() -} - fn setup_staking(v: u32, until: EraIndex) { let ed = CurrencyOf::::minimum_balance(); @@ -131,10 +111,8 @@ benchmarks! { // on_idle, we we don't check anyone, but fully unbond and move them to another pool. on_idle_unstake { let who = create_unexposed_nominator::(); - let pool_id = setup_pool::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - Some(pool_id) )); ErasToCheckPerBlock::::put(1); @@ -143,7 +121,7 @@ benchmarks! { on_idle_full_block::(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), maybe_pool_id: Some(pool_id) }) + Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap() }) ); } : { @@ -172,7 +150,6 @@ benchmarks! { let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - None, )); // no one is queued thus far. @@ -185,7 +162,7 @@ benchmarks! { let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked, maybe_pool_id: None }) + Some(UnstakeRequest { stash: who.clone(), checked }) ); assert!(matches!( fast_unstake_events::().last(), @@ -199,7 +176,7 @@ benchmarks! { assert_eq!(Queue::::count(), 0); } - :_(RawOrigin::Signed(who.clone()), None) + :_(RawOrigin::Signed(who.clone())) verify { assert_eq!(Queue::::count(), 1); } @@ -208,7 +185,6 @@ benchmarks! { let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - None )); assert_eq!(Queue::::count(), 1); whitelist_account!(who); diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 51416808f48c8..5acc9940debf1 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -19,8 +19,7 @@ //! //! If a nominator is not exposed in any `ErasStakers` (i.e. "has not actively backed any //! validators in the last `BondingDuration` days"), then they can register themselves in this -//! pallet, unstake faster than having to wait an entire bonding duration, and potentially move -//! into a nomination pool. +//! pallet, unstake faster than having to wait an entire bonding duration. //! //! Appearing in the exposure of a validator means being exposed equal to that validator from the //! point of view of the staking system. This usually means earning rewards with the validator, and @@ -43,8 +42,7 @@ //! to prevent them from accidentally exposing themselves behind a validator etc. //! //! Once processed, if successful, no additional fee for the checking process is taken, and the -//! staker is instantly unbonded. Optionally, if they have asked to join a pool, their *entire* -//! stake is joined into their pool of choice. +//! staker is instantly unbonded. //! //! If unsuccessful, meaning that the staker was exposed sometime in the last `BondingDuration` eras //! they will end up being slashed for the amount of wasted work they have inflicted on the chian. @@ -85,7 +83,6 @@ pub mod pallet { use frame_election_provider_support::ElectionProvider; use frame_support::pallet_prelude::*; use frame_system::{pallet_prelude::*, RawOrigin}; - use pallet_nomination_pools::PoolId; use pallet_staking::Pallet as Staking; use sp_runtime::{ traits::{Saturating, Zero}, @@ -109,12 +106,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: - frame_system::Config - + pallet_staking::Config< - CurrencyBalance = ::CurrencyBalance, - > + pallet_nomination_pools::Config - { + pub trait Config: frame_system::Config + pallet_staking::Config { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent> @@ -139,10 +131,9 @@ pub mod pallet { /// The map of all accounts wishing to be unstaked. /// - /// Points the `AccountId` wishing to unstake to the optional `PoolId` they wish to join - /// thereafter. + /// Keeps track of `AccountId` wishing to unstake. #[pallet::storage] - pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, Option>; + pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; /// Number of eras to check per block. /// @@ -158,7 +149,7 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A staker was unstaked. - Unstaked { stash: T::AccountId, maybe_pool_id: Option, result: DispatchResult }, + Unstaked { stash: T::AccountId, result: DispatchResult }, /// A staker was slashed for requesting fast-unstake whilst being exposed. Slashed { stash: T::AccountId, amount: BalanceOf }, /// A staker was partially checked for the given eras, but the process did not finish. @@ -213,16 +204,13 @@ pub mod pallet { /// they are guaranteed to remain eligible, because the call will chill them as well. /// /// If the check works, the entire staking data is removed, i.e. the stash is fully - /// unstaked, and they potentially join a pool with their entire bonded stake. + /// unstaked. /// /// If the check fails, the stash remains chilled and waiting for being unbonded as in with /// the normal staking system, but they lose part of their unbonding chunks due to consuming /// the chain's resources. #[pallet::weight(::WeightInfo::register_fast_unstake())] - pub fn register_fast_unstake( - origin: OriginFor, - maybe_pool_id: Option, - ) -> DispatchResult { + pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; let ledger = @@ -243,12 +231,11 @@ pub mod pallet { Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; // enqueue them. - Queue::::insert(ledger.stash, maybe_pool_id); + Queue::::insert(ledger.stash, ()); Ok(()) } - /// Deregister oneself from the fast-unstake (also cancels joining the pool if that was - /// supplied on `register_fast_unstake` . + /// Deregister oneself from the fast-unstake. /// /// This is useful if one is registered, they are still waiting, and they change their mind. /// @@ -327,17 +314,12 @@ pub mod pallet { return T::DbWeight::get().reads(2) } - let UnstakeRequest { stash, mut checked, maybe_pool_id } = match Head::::take() - .or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - Queue::::drain() - .map(|(stash, maybe_pool_id)| UnstakeRequest { - stash, - maybe_pool_id, - checked: Default::default(), - }) - .next() - }) { + let UnstakeRequest { stash, mut checked } = match Head::::take().or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + Queue::::drain() + .map(|(stash, _)| UnstakeRequest { stash, checked: Default::default() }) + .next() + }) { None => { // There's no `Head` and nothing in the `Queue`, nothing to do here. return T::DbWeight::get().reads(4) @@ -392,48 +374,15 @@ pub mod pallet { // `stash` is not exposed in any era now -- we can let go of them now. let num_slashing_spans = Staking::::slashing_spans(&stash).iter().count() as u32; - let ctrl = match pallet_staking::Bonded::::get(&stash) { - Some(ctrl) => ctrl, - None => { - Self::deposit_event(Event::::Errored { stash }); - return ::WeightInfo::on_idle_unstake() - }, - }; - - let ledger = match pallet_staking::Ledger::::get(ctrl) { - Some(ledger) => ledger, - None => { - Self::deposit_event(Event::::Errored { stash }); - return ::WeightInfo::on_idle_unstake() - }, - }; - - let unstake_result = pallet_staking::Pallet::::force_unstake( + let result = pallet_staking::Pallet::::force_unstake( RawOrigin::Root.into(), stash.clone(), num_slashing_spans, ); - let pool_stake_result = if let Some(pool_id) = maybe_pool_id { - pallet_nomination_pools::Pallet::::join( - RawOrigin::Signed(stash.clone()).into(), - ledger.total, - pool_id, - ) - } else { - Ok(()) - }; + log!(info, "unstaked {:?}, outcome: {:?}", stash, result); - let result = unstake_result.and(pool_stake_result); - log!( - info, - "unstaked {:?}, maybe_pool {:?}, outcome: {:?}", - stash, - maybe_pool_id, - result - ); - - Self::deposit_event(Event::::Unstaked { stash, maybe_pool_id, result }); + Self::deposit_event(Event::::Unstaked { stash, result }); ::WeightInfo::on_idle_unstake() } else { // eras remaining to be checked. @@ -471,11 +420,7 @@ pub mod pallet { // Not exposed in these eras. match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { Ok(_) => { - Head::::put(UnstakeRequest { - stash: stash.clone(), - checked, - maybe_pool_id, - }); + Head::::put(UnstakeRequest { stash: stash.clone(), checked }); Self::deposit_event(Event::::Checking { stash, eras: unchecked_eras_to_check, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index b9cf16e18e8d1..62f343709e245 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -17,19 +17,10 @@ use crate::{self as fast_unstake}; use frame_support::{ - assert_ok, - pallet_prelude::*, - parameter_types, - traits::{ConstU64, ConstU8, Currency}, - weights::constants::WEIGHT_PER_SECOND, - PalletId, -}; -use sp_runtime::{ - traits::{Convert, IdentityLookup}, - FixedU128, + pallet_prelude::*, parameter_types, traits::ConstU64, weights::constants::WEIGHT_PER_SECOND, }; +use sp_runtime::traits::{Convert, IdentityLookup}; -use frame_system::RawOrigin; use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; use sp_std::prelude::*; @@ -153,7 +144,7 @@ impl pallet_staking::Config for Runtime { type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; - type OnStakerSlash = Pools; + type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -172,29 +163,6 @@ impl Convert for U256ToBalance { } } -parameter_types! { - pub const PostUnbondingPoolsWindow: u32 = 10; - pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); - pub static MaxMetadataLen: u32 = 10; - pub static CheckLevel: u8 = 255; -} - -impl pallet_nomination_pools::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - type Currency = Balances; - type CurrencyBalance = Balance; - type RewardCounter = FixedU128; - type BalanceToU256 = BalanceToU256; - type U256ToBalance = U256ToBalance; - type StakingInterface = Staking; - type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; - type MaxMetadataLen = MaxMetadataLen; - type MaxUnbonding = ConstU32<8>; - type MaxPointsToBalance = ConstU8<10>; - type PalletId = PoolsPalletId; -} - parameter_types! { pub static SlashPerEra: u32 = 100; } @@ -218,7 +186,6 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp, Balances: pallet_balances, Staking: pallet_staking, - Pools: pallet_nomination_pools, FastUnstake: fast_unstake, } ); @@ -287,10 +254,6 @@ impl ExtBuilder { let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // create one default pool. - let _ = pallet_nomination_pools::GenesisConfig:: { ..Default::default() } - .assimilate_storage(&mut storage); - let validators_range = VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA; let nominators_range = NOMINATOR_PREFIX..NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA; @@ -337,11 +300,6 @@ impl ExtBuilder { // because we read this value as a measure of how many validators we have. pallet_staking::ValidatorCount::::put(VALIDATORS_PER_ERA as u32); - - // make a pool - let amount_to_bond = Pools::depositor_min_bond(); - Balances::make_free_balance_be(&10, amount_to_bond * 5); - assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); }); ext } @@ -359,14 +317,12 @@ pub(crate) fn run_to_block(n: u64, on_idle: bool) { while System::block_number() < n { Balances::on_finalize(System::block_number()); Staking::on_finalize(System::block_number()); - Pools::on_finalize(System::block_number()); FastUnstake::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); Balances::on_initialize(System::block_number()); Staking::on_initialize(System::block_number()); - Pools::on_initialize(System::block_number()); FastUnstake::on_initialize(System::block_number()); if on_idle { FastUnstake::on_idle(System::block_number(), BlockWeights::get().max_block); diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index a51c1acdf06eb..5586443ce797c 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -20,20 +20,15 @@ use super::*; use crate::{mock::*, types::*, weights::WeightInfo, Event}; use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; -use pallet_nomination_pools::{BondedPools, LastPoolId, RewardPools}; use pallet_staking::{CurrentEra, IndividualExposure, RewardDestination}; -use sp_runtime::{traits::BadOrigin, DispatchError, ModuleError}; +use sp_runtime::traits::BadOrigin; use sp_staking::StakingInterface; #[test] fn test_setup_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(BondedPools::::count(), 1); - assert_eq!(RewardPools::::count(), 1); assert_eq!(Staking::bonding_duration(), 3); - let last_pool = LastPoolId::::get(); - assert_eq!(last_pool, 1); }); } @@ -41,7 +36,7 @@ fn test_setup_works() { fn register_works() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Ensure stash is in the queue. assert_ne!(Queue::::get(1), None); }); @@ -56,7 +51,7 @@ fn cannot_register_if_not_bonded() { } // Attempt to fast unstake. assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1)), Error::::NotController ); }); @@ -66,10 +61,10 @@ fn cannot_register_if_not_bonded() { fn cannot_register_if_in_queue() { ExtBuilder::default().build_and_execute(|| { // Insert some Queue item - Queue::::insert(1, Some(1_u32)); + Queue::::insert(1, ()); // Cannot re-register, already in queue assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::AlreadyQueued ); }); @@ -79,10 +74,10 @@ fn cannot_register_if_in_queue() { fn cannot_register_if_head() { ExtBuilder::default().build_and_execute(|| { // Insert some Head item for stash - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); // Controller attempts to regsiter assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::AlreadyHead ); }); @@ -95,7 +90,7 @@ fn cannot_register_if_has_unlocking_chunks() { assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); // Cannot register for fast unstake with unlock chunks active assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::NotFullyBonded ); }); @@ -105,7 +100,7 @@ fn cannot_register_if_has_unlocking_chunks() { fn deregister_works() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); // Ensure stash no longer exists in the queue. @@ -117,7 +112,7 @@ fn deregister_works() { fn cannot_deregister_if_not_controller() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Stash tries to deregister. assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(1)), Error::::NotController); }); @@ -135,9 +130,9 @@ fn cannot_deregister_if_not_queued() { fn cannot_deregister_already_head() { ExtBuilder::default().build_and_execute(|| { // Controller attempts to register, should fail - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Insert some Head item for stash. - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); // Controller attempts to deregister assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); }); @@ -169,15 +164,15 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // set up Queue item - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // call on_idle with no remaining weight FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); // assert nothing changed in Queue and Head assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_eq!(Queue::::get(1), Some(())); }); } @@ -189,8 +184,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); assert_eq!(Queue::::count(), 1); assert_eq!(Head::::get(), None); @@ -209,7 +204,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); // when: another 1 era. @@ -225,11 +220,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // when: then 5 eras, we only need 2 more. @@ -251,11 +242,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: not enough weight to unstake: @@ -267,11 +254,7 @@ mod on_idle { assert_eq!(fast_unstake_events_since_last_call(), vec![]); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: enough weight to get over at least one iteration: then we are unblocked and can @@ -287,7 +270,7 @@ mod on_idle { // then we finish the unbonding: assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }] + vec![Event::Unstaked { stash: 1, result: Ok(()) }] ); assert_eq!(Head::::get(), None,); @@ -302,11 +285,11 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -317,11 +300,7 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); assert_eq!(Queue::::count(), 4); @@ -338,11 +317,7 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 5, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }), + Some(UnstakeRequest { stash: 5, checked: bounded_vec![3, 2, 1, 0] }), ); assert_eq!(Queue::::count(), 3); @@ -350,7 +325,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) }, + Event::Unstaked { stash: 1, result: Ok(()) }, Event::Checking { stash: 5, eras: vec![3, 2, 1, 0] } ] ); @@ -364,10 +339,10 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register multi accounts for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), Some(1))); - assert_eq!(Queue::::get(3), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_eq!(Queue::::get(3), Some(())); // assert 2 queue items are in Queue & None in Head to start with assert_eq!(Queue::::count(), 2); @@ -397,9 +372,9 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }, + Event::Unstaked { stash: 1, result: Ok(()) }, Event::Checking { stash: 3, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 3, maybe_pool_id: Some(1), result: Ok(()) }, + Event::Unstaked { stash: 3, result: Ok(()) }, ] ); @@ -409,14 +384,14 @@ mod on_idle { } #[test] - fn successful_unstake_without_pool_join() { + fn successful_unstake() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(BondingDuration::get() + 1); CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_eq!(Queue::::get(1), Some(None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -427,11 +402,7 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -441,55 +412,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } - ] - ); - assert_unstaked(&1); - }); - } - - #[test] - fn successful_unstake_joining_bad_pool() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(0))); - assert_eq!(Queue::::get(1), Some(Some(0))); - - // process on idle - next_block(true); - - // assert queue item has been moved to head - assert_eq!(Queue::::get(1), None); - - // assert head item present - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(0) - }) - ); - - next_block(true); - assert_eq!(Head::::get(), None,); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { - stash: 1, - maybe_pool_id: Some(0), - result: Err(DispatchError::Module(ModuleError { - index: 4, - error: [0, 0, 0, 0], - message: None - })) - } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); @@ -503,8 +426,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -515,11 +438,7 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -529,11 +448,10 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -545,8 +463,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -557,40 +475,28 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -604,11 +510,10 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![2] }, Event::Checking { stash: 1, eras: vec![1] }, Event::Checking { stash: 1, eras: vec![0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -623,39 +528,31 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_eq!(Queue::::get(1), Some(None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: a new era happens right before one is free. @@ -670,7 +567,6 @@ mod on_idle { stash: 1, // note era 0 is pruned to keep the vector length sane. checked: bounded_vec![3, 2, 1, 4], - maybe_pool_id: None }) ); @@ -685,7 +581,7 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![1] }, Event::Checking { stash: 1, eras: vec![0] }, Event::Checking { stash: 1, eras: vec![4] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); @@ -700,23 +596,19 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // process 2 blocks next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // when @@ -726,21 +618,13 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // then we register a new era. @@ -752,22 +636,14 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 4], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4] }) ); // progress to end next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 4, 1], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4, 1] }) ); // but notice that we don't care about era 0 instead anymore! we're done. @@ -781,12 +657,11 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![2] }, Event::Checking { stash: 1, eras: vec![4] }, Event::Checking { stash: 1, eras: vec![1] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -812,26 +687,18 @@ mod on_idle { assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3, 2], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -872,17 +739,13 @@ mod on_idle { assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3, 2], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -909,10 +772,7 @@ mod on_idle { RuntimeOrigin::signed(VALIDATOR_PREFIX), vec![VALIDATOR_PREFIX] )); - assert_ok!(FastUnstake::register_fast_unstake( - RuntimeOrigin::signed(VALIDATOR_PREFIX), - None - )); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(VALIDATOR_PREFIX))); // but they indeed are exposed! assert!(pallet_staking::ErasStakers::::contains_key( @@ -943,17 +803,13 @@ mod on_idle { assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); // let them register: - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42))); // 2 block's enough to unstake them. next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 42, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 42, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -962,7 +818,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 42, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 42, maybe_pool_id: None, result: Ok(()) } + Event::Unstaked { stash: 42, result: Ok(()) } ] ); }); @@ -990,7 +846,7 @@ mod signed_extension { ExtBuilder::default().build_and_execute(|| { // given: stash for 2 is 1. // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // then // stash can't. @@ -1010,7 +866,7 @@ mod signed_extension { ExtBuilder::default().build_and_execute(|| { // given: stash for 2 is 1. // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); ErasToCheckPerBlock::::put(1); CurrentEra::::put(BondingDuration::get()); @@ -1018,7 +874,7 @@ mod signed_extension { assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); // then diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index ae8702e56a842..e8d538dce4802 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -23,7 +23,6 @@ use frame_support::{ traits::{Currency, Get, IsSubType}, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; -use pallet_nomination_pools::PoolId; use scale_info::TypeInfo; use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; use sp_staking::EraIndex; @@ -42,8 +41,6 @@ pub struct UnstakeRequest, - /// The pool they wish to join, if any. - pub(crate) maybe_pool_id: Option, } #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, RuntimeDebugNoBound)] From a0ec652e341f694f182a3c5fcc80d4c3fb280003 Mon Sep 17 00:00:00 2001 From: ZhiYong Date: Mon, 26 Sep 2022 15:46:59 +0800 Subject: [PATCH 06/75] Remove discarded blocks and states from database by default (#11983) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 1.Add pruning param "canonical" in sc-cli. 2.Make PruningMode's default value to ArchiveCanonical. * Update tests in sc-state-db. * Update tests in sc-state-db. * 1.Add a new value `AllWithNonFinalized` in `enum BlocksPruning` which Corresponds to `blocks_pruning 0` in CLI . 2.Change value `All` to `AllFinalized` in `enum BlocksPruning` and make it to keep full finalized block history. * Make some corresponding adjustments based on the content in the conversation. * Update client/db/src/lib.rs Co-authored-by: Bastian Köcher * Apply suggestions from code review. * 1.Change `blocks_pruning` to be like `state_pruning` . * Fmt and add some doc. * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Update doc. * Change `new_test_with_tx_storage` to take `BlocksPruning`. * Fmt Co-authored-by: Bastian Köcher --- bin/node/cli/benches/block_production.rs | 2 +- bin/node/cli/benches/transaction_pool.rs | 2 +- bin/node/testing/src/bench.rs | 2 +- client/cli/src/config.rs | 4 +- client/cli/src/params/pruning_params.rs | 31 +++- client/db/benches/state_access.rs | 2 +- client/db/src/lib.rs | 215 +++++++++++++++++++---- client/service/test/src/client/mod.rs | 4 +- client/service/test/src/lib.rs | 2 +- test-utils/client/src/lib.rs | 5 +- 10 files changed, 211 insertions(+), 58 deletions(-) diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 0a734fa447448..4fcebb123d9e3 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -74,7 +74,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index e6084fba8242a..a8839642ddc26 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -68,7 +68,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 7980cc102fb38..59f1fa94c9b20 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -392,7 +392,7 @@ impl BenchDb { trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), - blocks_pruning: sc_client_db::BlocksPruning::All, + blocks_pruning: sc_client_db::BlocksPruning::KeepAll, }; let task_executor = TaskExecutor::new(); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index bc5941914de89..fad2ec7bc4a93 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -251,11 +251,11 @@ pub trait CliConfiguration: Sized { /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its - /// `BlocksPruning::All`. + /// `BlocksPruning::KeepFinalized`. fn blocks_pruning(&self) -> Result { self.pruning_params() .map(|x| x.blocks_pruning()) - .unwrap_or_else(|| Ok(BlocksPruning::All)) + .unwrap_or_else(|| Ok(BlocksPruning::KeepFinalized)) } /// Get the chain ID (string). diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 34a0982e63d95..b764e4722e94d 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -30,13 +30,16 @@ pub struct PruningParams { /// or for all of the canonical blocks (i.e 'archive-canonical'). #[clap(alias = "pruning", long, value_name = "PRUNING_MODE")] pub state_pruning: Option, - /// Specify the number of finalized blocks to keep in the database. + /// Specify the blocks pruning mode, a number of blocks to keep or 'archive'. /// - /// Default is to keep all blocks. + /// Default is to keep all finalized blocks. + /// otherwise, all blocks can be kept (i.e 'archive'), + /// or for all canonical blocks (i.e 'archive-canonical'), + /// or for the last N blocks (i.e a number). /// /// NOTE: only finalized blocks are subject for removal! #[clap(alias = "keep-blocks", long, value_name = "COUNT")] - pub blocks_pruning: Option, + pub blocks_pruning: Option, } impl PruningParams { @@ -46,9 +49,12 @@ impl PruningParams { .as_ref() .map(|s| match s.as_str() { "archive" => Ok(PruningMode::ArchiveAll), + "archive-canonical" => Ok(PruningMode::ArchiveCanonical), bc => bc .parse() - .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) + .map_err(|_| { + error::Error::Input("Invalid state pruning mode specified".to_string()) + }) .map(PruningMode::blocks_pruning), }) .transpose() @@ -56,9 +62,18 @@ impl PruningParams { /// Get the block pruning value from the parameters pub fn blocks_pruning(&self) -> error::Result { - Ok(match self.blocks_pruning { - Some(n) => BlocksPruning::Some(n), - None => BlocksPruning::All, - }) + match self.blocks_pruning.as_ref() { + Some(bp) => match bp.as_str() { + "archive" => Ok(BlocksPruning::KeepAll), + "archive-canonical" => Ok(BlocksPruning::KeepFinalized), + bc => bc + .parse() + .map_err(|_| { + error::Error::Input("Invalid blocks pruning mode specified".to_string()) + }) + .map(BlocksPruning::Some), + }, + None => Ok(BlocksPruning::KeepFinalized), + } } } diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 78aed7858e342..714dda82d61b7 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -122,7 +122,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend trie_cache_maximum_size, state_pruning: Some(PruningMode::ArchiveAll), source: DatabaseSource::ParityDb { path }, - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, }; Backend::new(settings, 100).expect("Creates backend") diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 79ef7e9b6625d..32c4c9ef85ed9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -320,10 +320,12 @@ pub struct DatabaseSettings { } /// Block pruning settings. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub enum BlocksPruning { - /// Keep full block history. - All, + /// Keep full block history, of every block that was ever imported. + KeepAll, + /// Keep full finalized block history. + KeepFinalized, /// Keep N recent finalized blocks. Some(u32), } @@ -1061,19 +1063,27 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(blocks_pruning, canonicalization_delay) + Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage(blocks_pruning: u32, canonicalization_delay: u64) -> Self { + pub fn new_test_with_tx_storage( + blocks_pruning: BlocksPruning, + canonicalization_delay: u64, + ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); + let state_pruning = match blocks_pruning { + BlocksPruning::KeepAll => PruningMode::ArchiveAll, + BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical, + BlocksPruning::Some(n) => PruningMode::blocks_pruning(n), + }; let db_setting = DatabaseSettings { trie_cache_maximum_size: Some(16 * 1024 * 1024), - state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)), + state_pruning: Some(state_pruning), source: DatabaseSource::Custom { db, require_create_flag: true }, - blocks_pruning: BlocksPruning::Some(blocks_pruning), + blocks_pruning, }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1707,32 +1717,47 @@ impl Backend { finalized: NumberFor, displaced: &FinalizationOutcome>, ) -> ClientResult<()> { - if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { - // Always keep the last finalized block - let keep = std::cmp::max(blocks_pruning, 1); - if finalized >= keep.into() { - let number = finalized.saturating_sub(keep.into()); - self.prune_block(transaction, BlockId::::number(number))?; - } + match self.blocks_pruning { + BlocksPruning::KeepAll => {}, + BlocksPruning::Some(blocks_pruning) => { + // Always keep the last finalized block + let keep = std::cmp::max(blocks_pruning, 1); + if finalized >= keep.into() { + let number = finalized.saturating_sub(keep.into()); + self.prune_block(transaction, BlockId::::number(number))?; + } + self.prune_displaced_branches(transaction, finalized, displaced)?; + }, + BlocksPruning::KeepFinalized => { + self.prune_displaced_branches(transaction, finalized, displaced)?; + }, + } + Ok(()) + } - // Also discard all blocks from displaced branches - for h in displaced.leaves() { - let mut number = finalized; - let mut hash = *h; - // Follow displaced chains back until we reach a finalized block. - // Since leaves are discarded due to finality, they can't have parents - // that are canonical, but not yet finalized. So we stop deleting as soon as - // we reach canonical chain. - while self.blockchain.hash(number)? != Some(hash) { - let id = BlockId::::hash(hash); - match self.blockchain.header(id)? { - Some(header) => { - self.prune_block(transaction, id)?; - number = header.number().saturating_sub(One::one()); - hash = *header.parent_hash(); - }, - None => break, - } + fn prune_displaced_branches( + &self, + transaction: &mut Transaction, + finalized: NumberFor, + displaced: &FinalizationOutcome>, + ) -> ClientResult<()> { + // Discard all blocks from displaced branches + for h in displaced.leaves() { + let mut number = finalized; + let mut hash = *h; + // Follow displaced chains back until we reach a finalized block. + // Since leaves are discarded due to finality, they can't have parents + // that are canonical, but not yet finalized. So we stop deleting as soon as + // we reach canonical chain. + while self.blockchain.hash(number)? != Some(hash) { + let id = BlockId::::hash(hash); + match self.blockchain.header(id)? { + Some(header) => { + self.prune_block(transaction, id)?; + number = header.number().saturating_sub(One::one()); + hash = *header.parent_hash(); + }, + None => break, } } } @@ -1752,6 +1777,13 @@ impl Backend { columns::BODY, id, )?; + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::JUSTIFICATIONS, + id, + )?; if let Some(index) = read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? { @@ -2506,7 +2538,7 @@ pub(crate) mod tests { trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, }, 0, ) @@ -3176,7 +3208,7 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize() { - let backend = Backend::::new_test_with_tx_storage(2, 0); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3210,9 +3242,114 @@ pub(crate) mod tests { assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); } + #[test] + fn prune_blocks_on_finalize_in_keep_all() { + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + for i in 1..3 { + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + } + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + } + + #[test] + fn prune_blocks_on_finalize_with_fork_in_keep_all() { + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + // insert a fork at block 2 + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + sp_core::H256::random(), + vec![2.into()], + None, + ) + .unwrap(); + insert_block( + &backend, + 3, + fork_hash_root, + None, + H256::random(), + vec![3.into(), 11.into()], + None, + ) + .unwrap(); + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(BlockId::Hash(blocks[4])).unwrap(); + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + + for i in 1..5 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[i])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + assert_eq!(bc.info().best_number, 4); + for i in 0..5 { + assert!(bc.hash(i).unwrap().is_some()); + } + } + #[test] fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3273,7 +3410,7 @@ pub(crate) mod tests { #[test] fn indexed_data_block_body() { - let backend = Backend::::new_test_with_tx_storage(1, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3315,7 +3452,7 @@ pub(crate) mod tests { #[test] fn index_invalid_size() { - let backend = Backend::::new_test_with_tx_storage(1, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3350,7 +3487,7 @@ pub(crate) mod tests { #[test] fn renew_transaction_storage() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); let x1 = ExtrinsicWrapper::from(0u64).encode(); @@ -3397,7 +3534,7 @@ pub(crate) mod tests { #[test] fn remove_leaf_block_works() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..2 { diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 2ab1415f8ca31..e0f47110d9046 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1200,7 +1200,7 @@ fn doesnt_import_blocks_that_revert_finality() { DatabaseSettings { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1426,7 +1426,7 @@ fn returns_status_for_pruned_blocks() { DatabaseSettings { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::blocks_pruning(1)), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 11c1cbaf7afb1..23245d46cba10 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -237,7 +237,7 @@ fn node_config< database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Default::default(), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index be4549c9957c0..d3e71f0ad28d6 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -26,7 +26,7 @@ pub use sc_client_api::{ execution_extensions::{ExecutionExtensions, ExecutionStrategies}, BadBlocks, ForkBlocks, }; -pub use sc_client_db::{self, Backend}; +pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; @@ -102,7 +102,8 @@ impl /// Create new `TestClientBuilder` with default backend and storage chain mode pub fn with_tx_storage(blocks_pruning: u32) -> Self { - let backend = Arc::new(Backend::new_test_with_tx_storage(blocks_pruning, 0)); + let backend = + Arc::new(Backend::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), 0)); Self::with_backend(backend) } } From 0dbeaa0e98e78a848046a815f979bfce67f98c6d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 26 Sep 2022 12:08:34 +0100 Subject: [PATCH 07/75] re-export weight file for fast-unstsake pallet (#12352) --- frame/fast-unstake/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 5acc9940debf1..9bfb29f8457fa 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -90,7 +90,7 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; - use weights::WeightInfo; + pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] #[codec(mel_bound(T: Config))] From f360c87073cb5f59b7b55cfbfef5d91a40d1a217 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 26 Sep 2022 15:10:09 +0300 Subject: [PATCH 08/75] Move transactions protocol to its own crate (#12264) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move transaction protocol to its own crate * Update Cargo.lock * Fix binaries * Update client/network/transactions/src/lib.rs Co-authored-by: Dmitry Markin * Update client/service/src/builder.rs Co-authored-by: Bastian Köcher * Apply review comments * Revert one change and apply cargo-fmt * Remove Transaction from Message * Add array-bytes * trigger CI * Add comment about codec index Co-authored-by: Dmitry Markin Co-authored-by: Bastian Köcher --- Cargo.lock | 24 ++ bin/node-template/node/src/service.rs | 3 +- bin/node/cli/src/service.rs | 3 +- client/beefy/Cargo.toml | 1 + client/beefy/src/lib.rs | 4 +- client/cli/Cargo.toml | 1 + client/cli/src/params/network_params.rs | 5 +- client/finality-grandpa/src/lib.rs | 8 +- client/network/common/Cargo.toml | 3 + client/network/common/src/config.rs | 128 +++++++++++ client/network/{ => common}/src/error.rs | 3 +- client/network/common/src/lib.rs | 8 + client/network/common/src/service.rs | 29 --- client/network/{ => common}/src/utils.rs | 0 client/network/src/config.rs | 209 +----------------- client/network/src/discovery.rs | 3 +- client/network/src/lib.rs | 13 +- client/network/src/peer_info.rs | 2 +- client/network/src/protocol.rs | 13 +- client/network/src/protocol/message.rs | 10 +- client/network/src/service.rs | 77 ++----- client/network/src/service/tests.rs | 56 ++--- client/network/test/src/lib.rs | 14 +- client/network/transactions/Cargo.toml | 28 +++ client/network/transactions/src/config.rs | 98 ++++++++ .../src/lib.rs} | 108 ++++----- client/service/Cargo.toml | 1 + client/service/src/builder.rs | 62 ++++-- client/service/src/config.rs | 6 +- client/service/src/error.rs | 3 +- client/service/src/lib.rs | 7 +- client/service/test/src/lib.rs | 7 +- 32 files changed, 466 insertions(+), 471 deletions(-) rename client/network/{ => common}/src/error.rs (96%) rename client/network/{ => common}/src/utils.rs (100%) create mode 100644 client/network/transactions/Cargo.toml create mode 100644 client/network/transactions/src/config.rs rename client/network/{src/transactions.rs => transactions/src/lib.rs} (84%) diff --git a/Cargo.lock b/Cargo.lock index 6ea79a120361e..a9a0eef551179 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -468,6 +468,7 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", + "sc-network-common", "sc-network-gossip", "sc-network-test", "sc-utils", @@ -7934,6 +7935,7 @@ dependencies = [ "sc-client-db", "sc-keystore", "sc-network", + "sc-network-common", "sc-service", "sc-telemetry", "sc-tracing", @@ -8547,7 +8549,9 @@ dependencies = [ "bitflags", "bytes", "futures", + "futures-timer", "libp2p", + "linked_hash_set", "parity-scale-codec", "prost-build 0.10.4", "sc-consensus", @@ -8558,6 +8562,7 @@ dependencies = [ "sp-consensus", "sp-finality-grandpa", "sp-runtime", + "substrate-prometheus-endpoint", "thiserror", ] @@ -8663,6 +8668,24 @@ dependencies = [ "substrate-test-runtime-client", ] +[[package]] +name = "sc-network-transactions" +version = "0.10.0-dev" +dependencies = [ + "array-bytes", + "futures", + "hex", + "libp2p", + "log", + "parity-scale-codec", + "pin-project", + "sc-network-common", + "sc-peerset", + "sp-consensus", + "sp-runtime", + "substrate-prometheus-endpoint", +] + [[package]] name = "sc-offchain" version = "4.0.0-dev" @@ -8851,6 +8874,7 @@ dependencies = [ "sc-network-common", "sc-network-light", "sc-network-sync", + "sc-network-transactions", "sc-offchain", "sc-rpc", "sc-rpc-server", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 6ec9a33749a69..96de6e17f3bfd 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -191,7 +191,7 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -238,6 +238,7 @@ pub fn new_full(mut config: Configuration) -> Result rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, + tx_handler_controller, config, telemetry: telemetry.as_mut(), })?; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index a3098eac6402f..6c29f0c08ee13 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -354,7 +354,7 @@ pub fn new_full_base( Vec::default(), )); - let (network, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -392,6 +392,7 @@ pub fn new_full_base( transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, system_rpc_tx, + tx_handler_controller, telemetry: telemetry.as_mut(), })?; diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 47a3be859cbbb..a125d4c8d4f07 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -27,6 +27,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../client/finality-grandpa" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 41eeec43d64bd..ad527b2929585 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -83,8 +83,8 @@ pub(crate) mod beefy_protocol_name { /// For standard protocol name see [`beefy_protocol_name::standard_name`]. pub fn beefy_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { - let mut cfg = sc_network::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); +) -> sc_network_common::config::NonDefaultSetConfig { + let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); cfg.allow_non_reserved(25, 25); cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index e5cd6167596c0..37a8fd2e0b64d 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -34,6 +34,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../service" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 74c2db92c3215..0450b5f0e2566 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -19,11 +19,10 @@ use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams}; use clap::Args; use sc_network::{ - config::{ - NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig, - }, + config::{NetworkConfiguration, NodeKeyConfig}, multiaddr::Protocol, }; +use sc_network_common::config::{NonReservedPeerMode, SetConfig, TransportConfig}; use sc_service::{ config::{Multiaddr, MultiaddrWithPeerId}, ChainSpec, ChainType, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 7e47b70bd6b98..d5c05fea78aa2 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -688,18 +688,18 @@ pub struct GrandpaParams { /// For standard protocol name see [`crate::protocol_standard_name`]. pub fn grandpa_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { +) -> sc_network_common::config::NonDefaultSetConfig { use communication::grandpa_protocol_name; - sc_network::config::NonDefaultSetConfig { + sc_network_common::config::NonDefaultSetConfig { notifications_protocol: protocol_name, fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. max_notification_size: 1024 * 1024, - set_config: sc_network::config::SetConfig { + set_config: sc_network_common::config::SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, + non_reserved_mode: sc_network_common::config::NonReservedPeerMode::Deny, }, } } diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 47d43e8b4b03f..1ee7b15538366 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -24,7 +24,10 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" +futures-timer = "3.0.2" libp2p = "0.46.1" +linked_hash_set = "0.1.3" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } smallvec = "1.8.0" sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 8b7e045780d7d..fb23cd0174922 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -18,6 +18,8 @@ //! Configuration of the networking layer. +use crate::protocol; + use libp2p::{multiaddr, Multiaddr, PeerId}; use std::{fmt, str, str::FromStr}; @@ -171,3 +173,129 @@ impl From for ParseErr { Self::MultiaddrParse(err) } } + +/// Configuration for a set of nodes. +#[derive(Clone, Debug)] +pub struct SetConfig { + /// Maximum allowed number of incoming substreams related to this set. + pub in_peers: u32, + /// Number of outgoing substreams related to this set that we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically + /// refused. + pub non_reserved_mode: NonReservedPeerMode, +} + +impl Default for SetConfig { + fn default() -> Self { + Self { + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } + } +} + +/// Extension to [`SetConfig`] for sets that aren't the default set. +/// +/// > **Note**: As new fields might be added in the future, please consider using the `new` method +/// > and modifiers instead of creating this struct manually. +#[derive(Clone, Debug)] +pub struct NonDefaultSetConfig { + /// Name of the notifications protocols of this set. A substream on this set will be + /// considered established once this protocol is open. + /// + /// > **Note**: This field isn't present for the default set, as this is handled internally + /// > by the networking code. + pub notifications_protocol: protocol::ProtocolName, + /// If the remote reports that it doesn't support the protocol indicated in the + /// `notifications_protocol` field, then each of these fallback names will be tried one by + /// one. + /// + /// If a fallback is used, it will be reported in + /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` + pub fallback_names: Vec, + /// Maximum allowed size of single notifications. + pub max_notification_size: u64, + /// Base configuration. + pub set_config: SetConfig, +} + +impl NonDefaultSetConfig { + /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. + pub fn new(notifications_protocol: protocol::ProtocolName, max_notification_size: u64) -> Self { + Self { + notifications_protocol, + max_notification_size, + fallback_names: Vec::new(), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } + + /// Modifies the configuration to allow non-reserved nodes. + pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { + self.set_config.in_peers = in_peers; + self.set_config.out_peers = out_peers; + self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; + } + + /// Add a node to the list of reserved nodes. + pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { + self.set_config.reserved_nodes.push(peer); + } + + /// Add a list of protocol names used for backward compatibility. + /// + /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. + pub fn add_fallback_names(&mut self, fallback_names: Vec) { + self.fallback_names.extend(fallback_names); + } +} + +/// Configuration for the transport layer. +#[derive(Clone, Debug)] +pub enum TransportConfig { + /// Normal transport mode. + Normal { + /// If true, the network will use mDNS to discover other libp2p nodes on the local network + /// and connect to them if they support the same chain. + enable_mdns: bool, + + /// If true, allow connecting to private IPv4 addresses (as defined in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have + /// been passed in `::sc_network::config::NetworkConfiguration::boot_nodes`. + allow_private_ipv4: bool, + }, + + /// Only allow connections within the same process. + /// Only addresses of the form `/memory/...` will be supported. + MemoryOnly, +} + +/// The policy for connections to non-reserved peers. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum NonReservedPeerMode { + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, +} + +impl NonReservedPeerMode { + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(Self::Accept), + "deny" => Some(Self::Deny), + _ => None, + } + } +} diff --git a/client/network/src/error.rs b/client/network/common/src/error.rs similarity index 96% rename from client/network/src/error.rs rename to client/network/common/src/error.rs index b4287ffbd55db..4326b1af52836 100644 --- a/client/network/src/error.rs +++ b/client/network/common/src/error.rs @@ -18,9 +18,8 @@ //! Substrate network possible errors. -use crate::config::TransportConfig; +use crate::{config::TransportConfig, protocol::ProtocolName}; use libp2p::{Multiaddr, PeerId}; -use sc_network_common::protocol::ProtocolName; use std::fmt; diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 3a30d24900199..36e67f11e5cff 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -19,8 +19,16 @@ //! Common data structures of the networking layer. pub mod config; +pub mod error; pub mod message; pub mod protocol; pub mod request_responses; pub mod service; pub mod sync; +pub mod utils; + +/// Minimum Requirements for a Hash within Networking +pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} + +impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static +{} diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index 88583832e4c38..aa4967ba51700 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -604,35 +604,6 @@ where } } -/// Provides ability to propagate transactions over the network. -pub trait NetworkTransaction { - /// You may call this when new transactions are imported by the transaction pool. - /// - /// All transactions will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - fn trigger_repropagate(&self); - - /// You must call when new transaction is imported by the transaction pool. - /// - /// This transaction will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - fn propagate_transaction(&self, hash: H); -} - -impl NetworkTransaction for Arc -where - T: ?Sized, - T: NetworkTransaction, -{ - fn trigger_repropagate(&self) { - T::trigger_repropagate(self) - } - - fn propagate_transaction(&self, hash: H) { - T::propagate_transaction(self, hash) - } -} - /// Provides ability to announce blocks to the network. pub trait NetworkBlock { /// Make sure an important block is propagated to peers. diff --git a/client/network/src/utils.rs b/client/network/common/src/utils.rs similarity index 100% rename from client/network/src/utils.rs rename to client/network/common/src/utils.rs diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 202a628884d79..b2adfa81d065b 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -27,24 +27,24 @@ pub use sc_network_common::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, sync::warp::WarpSyncProvider, + ExHashT, }; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use crate::ExHashT; - use core::{fmt, iter}; -use futures::future; use libp2p::{ identity::{ed25519, Keypair}, multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName, sync::ChainSync}; +use sc_network_common::{ + config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, + sync::ChainSync, +}; use sp_runtime::traits::Block as BlockT; use std::{ - collections::HashMap, error::Error, fs, future::Future, @@ -52,16 +52,14 @@ use std::{ net::Ipv4Addr, path::{Path, PathBuf}, pin::Pin, - str, sync::Arc, }; use zeroize::Zeroize; /// Network initialization parameters. -pub struct Params +pub struct Params where B: BlockT + 'static, - H: ExHashT, { /// Assigned role for our node (full, light, ...). pub role: Role, @@ -70,21 +68,12 @@ where /// default. pub executor: Option + Send>>) + Send>>, - /// How to spawn the background task dedicated to the transactions handler. - pub transactions_handler_executor: Box + Send>>) + Send>, - /// Network layer configuration. pub network_config: NetworkConfiguration, /// Client that contains the blockchain. pub chain: Arc, - /// Pool of transactions. - /// - /// The network worker will fetch transactions from this object in order to propagate them on - /// the network. - pub transaction_pool: Arc>, - /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, @@ -166,66 +155,6 @@ impl fmt::Display for Role { } } -/// Result of the transaction import. -#[derive(Clone, Copy, Debug)] -pub enum TransactionImport { - /// Transaction is good but already known by the transaction pool. - KnownGood, - /// Transaction is good and not yet known. - NewGood, - /// Transaction is invalid. - Bad, - /// Transaction import was not performed. - None, -} - -/// Future resolving to transaction import result. -pub type TransactionImportFuture = Pin + Send>>; - -/// Transaction pool interface -pub trait TransactionPool: Send + Sync { - /// Get transactions from the pool that are ready to be propagated. - fn transactions(&self) -> Vec<(H, B::Extrinsic)>; - /// Get hash of transaction. - fn hash_of(&self, transaction: &B::Extrinsic) -> H; - /// Import a transaction into the pool. - /// - /// This will return future. - fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; - /// Notify the pool about transactions broadcast. - fn on_broadcasted(&self, propagations: HashMap>); - /// Get transaction by hash. - fn transaction(&self, hash: &H) -> Option; -} - -/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always -/// empty and discards all incoming transactions. -/// -/// Requires the "hash" type to implement the `Default` trait. -/// -/// Useful for testing purposes. -pub struct EmptyTransactionPool; - -impl TransactionPool for EmptyTransactionPool { - fn transactions(&self) -> Vec<(H, B::Extrinsic)> { - Vec::new() - } - - fn hash_of(&self, _transaction: &B::Extrinsic) -> H { - Default::default() - } - - fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { - Box::pin(future::ready(TransactionImport::KnownGood)) - } - - fn on_broadcasted(&self, _: HashMap>) {} - - fn transaction(&self, _h: &H) -> Option { - None - } -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { @@ -394,132 +323,6 @@ impl NetworkConfiguration { } } -/// Configuration for a set of nodes. -#[derive(Clone, Debug)] -pub struct SetConfig { - /// Maximum allowed number of incoming substreams related to this set. - pub in_peers: u32, - /// Number of outgoing substreams related to this set that we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically - /// refused. - pub non_reserved_mode: NonReservedPeerMode, -} - -impl Default for SetConfig { - fn default() -> Self { - Self { - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, - } - } -} - -/// Extension to [`SetConfig`] for sets that aren't the default set. -/// -/// > **Note**: As new fields might be added in the future, please consider using the `new` method -/// > and modifiers instead of creating this struct manually. -#[derive(Clone, Debug)] -pub struct NonDefaultSetConfig { - /// Name of the notifications protocols of this set. A substream on this set will be - /// considered established once this protocol is open. - /// - /// > **Note**: This field isn't present for the default set, as this is handled internally - /// > by the networking code. - pub notifications_protocol: ProtocolName, - /// If the remote reports that it doesn't support the protocol indicated in the - /// `notifications_protocol` field, then each of these fallback names will be tried one by - /// one. - /// - /// If a fallback is used, it will be reported in - /// [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. - pub fallback_names: Vec, - /// Maximum allowed size of single notifications. - pub max_notification_size: u64, - /// Base configuration. - pub set_config: SetConfig, -} - -impl NonDefaultSetConfig { - /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. - pub fn new(notifications_protocol: ProtocolName, max_notification_size: u64) -> Self { - Self { - notifications_protocol, - max_notification_size, - fallback_names: Vec::new(), - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - } - } - - /// Modifies the configuration to allow non-reserved nodes. - pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { - self.set_config.in_peers = in_peers; - self.set_config.out_peers = out_peers; - self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; - } - - /// Add a node to the list of reserved nodes. - pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { - self.set_config.reserved_nodes.push(peer); - } - - /// Add a list of protocol names used for backward compatibility. - /// - /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. - pub fn add_fallback_names(&mut self, fallback_names: Vec) { - self.fallback_names.extend(fallback_names); - } -} - -/// Configuration for the transport layer. -#[derive(Clone, Debug)] -pub enum TransportConfig { - /// Normal transport mode. - Normal { - /// If true, the network will use mDNS to discover other libp2p nodes on the local network - /// and connect to them if they support the same chain. - enable_mdns: bool, - - /// If true, allow connecting to private IPv4 addresses (as defined in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have - /// been passed in [`NetworkConfiguration::boot_nodes`]. - allow_private_ipv4: bool, - }, - - /// Only allow connections within the same process. - /// Only addresses of the form `/memory/...` will be supported. - MemoryOnly, -} - -/// The policy for connections to non-reserved peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum NonReservedPeerMode { - /// Accept them. This is the default. - Accept, - /// Deny them. - Deny, -} - -impl NonReservedPeerMode { - /// Attempt to parse the peer mode from a string. - pub fn parse(s: &str) -> Option { - match s { - "accept" => Some(Self::Accept), - "deny" => Some(Self::Deny), - _ => None, - } - } -} - /// The configuration of a node's secret key, describing the type of key /// and how it is obtained. A node's identity keypair is the result of /// the evaluation of the node key configuration. diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index ab93662968dc2..8422e34485125 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -46,7 +46,6 @@ //! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn //! of a node's address, you must call `add_self_reported_address`. -use crate::utils::LruHashSet; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; @@ -72,7 +71,7 @@ use libp2p::{ }, }; use log::{debug, error, info, trace, warn}; -use sc_network_common::config::ProtocolId; +use sc_network_common::{config::ProtocolId, utils::LruHashSet}; use sp_core::hexdisplay::HexDisplay; use std::{ cmp, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 320104d0f9554..d17f47328b804 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -251,12 +251,9 @@ mod protocol; mod request_responses; mod service; mod transport; -mod utils; pub mod config; -pub mod error; pub mod network_state; -pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; @@ -269,8 +266,8 @@ pub use sc_network_common::{ request_responses::{IfDisconnected, RequestFailure}, service::{ KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, - NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, - NetworkTransaction, Signature, SigningError, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, Signature, + SigningError, }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, @@ -295,9 +292,3 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2; /// The maximum number of concurrent established connections that were incoming. const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; - -/// Minimum Requirements for a Hash within Networking -pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} - -impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static -{} diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index d668cb25ea455..c62c2ea1c5d98 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -16,7 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::utils::interval; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ @@ -33,6 +32,7 @@ use libp2p::{ Multiaddr, }; use log::{debug, error, trace}; +use sc_network_common::utils::interval; use smallvec::SmallVec; use std::{ collections::hash_map::Entry, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c7a3cf4b2160f..fbf651de9d49a 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -16,10 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ - config, error, - utils::{interval, LruHashSet}, -}; +use crate::config; use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; @@ -45,7 +42,8 @@ use sc_consensus::import_queue::{ BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, }; use sc_network_common::{ - config::ProtocolId, + config::{NonReservedPeerMode, ProtocolId}, + error, protocol::ProtocolName, request_responses::RequestFailure, sync::{ @@ -57,6 +55,7 @@ use sc_network_common::{ OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PollBlockAnnounceValidation, SyncStatus, }, + utils::{interval, LruHashSet}, }; use sp_arithmetic::traits::SaturatedConversion; use sp_consensus::BlockOrigin; @@ -341,7 +340,7 @@ where bootnodes, reserved_nodes: default_sets_reserved.clone(), reserved_only: network_config.default_peers_set.non_reserved_mode == - config::NonReservedPeerMode::Deny, + NonReservedPeerMode::Deny, }); for set_cfg in &network_config.extra_sets { @@ -352,7 +351,7 @@ where } let reserved_only = - set_cfg.set_config.non_reserved_mode == config::NonReservedPeerMode::Deny; + set_cfg.set_config.non_reserved_mode == NonReservedPeerMode::Deny; sets.push(sc_peerset::SetConfig { in_peers: set_cfg.set_config.in_peers, diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 50c4a264a5f95..3e1281753b82c 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -36,9 +36,6 @@ pub type Message = generic::Message< ::Extrinsic, >; -/// A set of transactions. -pub type Transactions = Vec; - /// Remote call response. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct RemoteCallResponse { @@ -59,7 +56,7 @@ pub struct RemoteReadResponse { /// Generic types. pub mod generic { - use super::{RemoteCallResponse, RemoteReadResponse, Transactions}; + use super::{RemoteCallResponse, RemoteReadResponse}; use bitflags::bitflags; use codec::{Decode, Encode, Input, Output}; use sc_client_api::StorageProof; @@ -146,9 +143,10 @@ pub mod generic { BlockResponse(BlockResponse), /// Block announce. BlockAnnounce(BlockAnnounce

), - /// Transactions. - Transactions(Transactions), /// Consensus protocol message. + // NOTE: index is incremented by 1 due to transaction-related + // message that was removed + #[codec(index = 6)] Consensus(ConsensusMessage), /// Remote method call request. RemoteCallRequest(RemoteCallRequest), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index dceb57d9e695c..180482e75ece2 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -29,9 +29,8 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, - config::{Params, TransportConfig}, + config::Params, discovery::DiscoveryConfig, - error::Error, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, @@ -39,7 +38,7 @@ use crate::{ self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready, }, - transactions, transport, ExHashT, ReputationChange, + transport, ReputationChange, }; use codec::Encode as _; @@ -60,7 +59,8 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ - config::MultiaddrWithPeerId, + config::{MultiaddrWithPeerId, TransportConfig}, + error::Error, protocol::{ event::{DhtEvent, Event}, ProtocolName, @@ -73,6 +73,7 @@ use sc_network_common::{ NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, sync::{SyncState, SyncStatus}, + ExHashT, }; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -101,7 +102,7 @@ mod out_events; mod tests; pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; -use sc_network_common::service::{NetworkBlock, NetworkRequest, NetworkTransaction}; +use sc_network_common::service::{NetworkBlock, NetworkRequest}; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -121,7 +122,7 @@ pub struct NetworkService { /// nodes it should be connected to or not. peerset: PeersetHandle, /// Channel that sends messages to the actual worker. - to_worker: TracingUnboundedSender>, + to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. peers_notifications_sinks: Arc>>, @@ -144,7 +145,7 @@ where /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(mut params: Params) -> Result { + pub fn new(mut params: Params) -> Result { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); @@ -215,21 +216,6 @@ where fs::create_dir_all(path)?; } - let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( - params.protocol_id.clone(), - params - .chain - .hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - params.fork_id.clone(), - ); - params - .network_config - .extra_sets - .insert(0, transactions_handler_proto.set_config()); - info!( target: "sub-libp2p", "🏷 Local node identity is: {}", @@ -244,11 +230,8 @@ where params.protocol_id.clone(), ¶ms.fork_id, ¶ms.network_config, - iter::once(Vec::new()) - .chain( - (0..params.network_config.extra_sets.len() - 1) - .map(|_| default_notif_handshake_message.clone()), - ) + (0..params.network_config.extra_sets.len()) + .map(|_| default_notif_handshake_message.clone()) .collect(), params.metrics_registry.as_ref(), params.chain_sync, @@ -465,13 +448,6 @@ where _marker: PhantomData, }); - let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( - service.clone(), - params.transaction_pool, - params.metrics_registry.as_ref(), - )?; - (params.transactions_handler_executor)(tx_handler.run().boxed()); - Ok(NetworkWorker { external_addresses, num_connected, @@ -482,9 +458,9 @@ where from_service, event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, - tx_handler_controller, metrics, boot_node_ids, + _marker: Default::default(), }) } @@ -1149,20 +1125,6 @@ where } } -impl NetworkTransaction for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); - } - - fn propagate_transaction(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); - } -} - impl NetworkBlock> for NetworkService where B: BlockT + 'static, @@ -1249,9 +1211,7 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. -enum ServiceToWorkerMsg { - PropagateTransaction(H), - PropagateTransactions, +enum ServiceToWorkerMsg { RequestJustification(B::Hash, NumberFor), ClearJustificationRequests, AnnounceBlock(B::Hash, Option>), @@ -1309,7 +1269,7 @@ where /// The import queue that was passed at initialization. import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. - from_service: TracingUnboundedReceiver>, + from_service: TracingUnboundedReceiver>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. @@ -1319,8 +1279,9 @@ where /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. peers_notifications_sinks: Arc>>, - /// Controller for the handler of incoming and outgoing transactions. - tx_handler_controller: transactions::TransactionsHandlerController, + /// Marker to pin the `H` generic. Serves no purpose except to not break backwards + /// compatibility. + _marker: PhantomData, } impl Future for NetworkWorker @@ -1376,10 +1337,6 @@ where .behaviour_mut() .user_protocol_mut() .clear_justification_requests(), - ServiceToWorkerMsg::PropagateTransaction(hash) => - this.tx_handler_controller.propagate_transaction(hash), - ServiceToWorkerMsg::PropagateTransactions => - this.tx_handler_controller.propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => this.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1922,8 +1879,6 @@ where SyncState::Downloading => true, }; - this.tx_handler_controller.set_gossip_enabled(!is_major_syncing); - this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index a9505c5341c3d..c8f137f79c6dc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -21,7 +21,7 @@ use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, protocol::event::Event, service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, }; @@ -135,12 +135,8 @@ fn build_test_full_node( let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), network_config, chain: client.clone(), - transaction_pool: Arc::new(config::EmptyTransactionPool), protocol_id, fork_id, import_queue, @@ -178,23 +174,23 @@ fn build_nodes_one_proto() -> ( let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), @@ -203,7 +199,7 @@ fn build_nodes_one_proto() -> ( }, }], listen_addresses: vec![], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -368,13 +364,13 @@ fn lots_of_incoming_peers_works() { let (main_node, _) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { in_peers: u32::MAX, ..Default::default() }, + set_config: SetConfig { in_peers: u32::MAX, ..Default::default() }, }], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -387,11 +383,11 @@ fn lots_of_incoming_peers_works() { for _ in 0..32 { let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![], - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), peer_id: main_node_peer_id, @@ -399,7 +395,7 @@ fn lots_of_incoming_peers_works() { ..Default::default() }, }], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -504,23 +500,23 @@ fn fallback_name_working() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: NEW_PROTOCOL_NAME.into(), fallback_names: vec![PROTOCOL_NAME.into()], max_notification_size: 1024 * 1024, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), @@ -529,7 +525,7 @@ fn fallback_name_working() { }, }], listen_addresses: vec![], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -572,7 +568,7 @@ fn ensure_listen_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -599,7 +595,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, boot_nodes: vec![boot_node], ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); @@ -632,11 +628,8 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, - default_peers_set: config::SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, + transport: TransportConfig::MemoryOnly, + default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -652,10 +645,7 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - default_peers_set: config::SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, + default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -668,7 +658,7 @@ fn ensure_public_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, public_addresses: vec![public_address], ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index e78b91a4e04ee..2f6b788e368b3 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -47,16 +47,14 @@ use sc_consensus::{ ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, Verifier, }; -pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - config::{ - NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, SyncMode, - TransportConfig, - }, + config::{NetworkConfiguration, Role, SyncMode}, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{ + MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, + }, protocol::ProtocolName, service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, @@ -879,12 +877,8 @@ where let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), network_config, chain: client.clone(), - transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, fork_id, import_queue, diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml new file mode 100644 index 0000000000000..5578bb2c7191e --- /dev/null +++ b/client/network/transactions/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = "Substrate transaction protocol" +name = "sc-network-transactions" +version = "0.10.0-dev" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2021" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network-transactions" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +array-bytes = "4.1" +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +futures = "0.3.21" +hex = "0.4.0" +libp2p = "0.46.1" +log = "0.4.17" +pin-project = "1.0.10" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } +sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/transactions/src/config.rs b/client/network/transactions/src/config.rs new file mode 100644 index 0000000000000..abb8cccd301ac --- /dev/null +++ b/client/network/transactions/src/config.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Configuration of the transaction protocol + +use futures::prelude::*; +use sc_network_common::ExHashT; +use sp_runtime::traits::Block as BlockT; +use std::{collections::HashMap, future::Future, pin::Pin, time}; + +/// Interval at which we propagate transactions; +pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximum number of known transaction hashes to keep for a peer. +/// +/// This should be approx. 2 blocks full of transactions for the network to function properly. +pub(crate) const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. + +/// Maximum allowed size for a transactions notification. +pub(crate) const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; + +/// Maximum number of transaction validation request we keep at any moment. +pub(crate) const MAX_PENDING_TRANSACTIONS: usize = 8192; + +/// Result of the transaction import. +#[derive(Clone, Copy, Debug)] +pub enum TransactionImport { + /// Transaction is good but already known by the transaction pool. + KnownGood, + /// Transaction is good and not yet known. + NewGood, + /// Transaction is invalid. + Bad, + /// Transaction import was not performed. + None, +} + +/// Future resolving to transaction import result. +pub type TransactionImportFuture = Pin + Send>>; + +/// Transaction pool interface +pub trait TransactionPool: Send + Sync { + /// Get transactions from the pool that are ready to be propagated. + fn transactions(&self) -> Vec<(H, B::Extrinsic)>; + /// Get hash of transaction. + fn hash_of(&self, transaction: &B::Extrinsic) -> H; + /// Import a transaction into the pool. + /// + /// This will return future. + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap>); + /// Get transaction by hash. + fn transaction(&self, hash: &H) -> Option; +} + +/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always +/// empty and discards all incoming transactions. +/// +/// Requires the "hash" type to implement the `Default` trait. +/// +/// Useful for testing purposes. +pub struct EmptyTransactionPool; + +impl TransactionPool for EmptyTransactionPool { + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + Vec::new() + } + + fn hash_of(&self, _transaction: &B::Extrinsic) -> H { + Default::default() + } + + fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { + Box::pin(future::ready(TransactionImport::KnownGood)) + } + + fn on_broadcasted(&self, _: HashMap>) {} + + fn transaction(&self, _h: &H) -> Option { + None + } +} diff --git a/client/network/src/transactions.rs b/client/network/transactions/src/lib.rs similarity index 84% rename from client/network/src/transactions.rs rename to client/network/transactions/src/lib.rs index da4547aefeab3..b75bd411b39c4 100644 --- a/client/network/src/transactions.rs +++ b/client/network/transactions/src/lib.rs @@ -26,27 +26,22 @@ //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a //! `Future` that processes transactions. -use crate::{ - config::{self, TransactionImport, TransactionImportFuture, TransactionPool}, - error, - protocol::message, - service::NetworkService, - utils::{interval, LruHashSet}, - ExHashT, -}; - +use crate::config::*; use codec::{Decode, Encode}; use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network_common::{ - config::ProtocolId, + config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, + error, protocol::{ event::{Event, ObservedRole}, ProtocolName, }, service::{NetworkEventStream, NetworkNotification, NetworkPeers}, + utils::{interval, LruHashSet}, + ExHashT, }; use sp_runtime::traits::Block as BlockT; use std::{ @@ -54,27 +49,14 @@ use std::{ iter, num::NonZeroUsize, pin::Pin, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, + sync::Arc, task::Poll, - time, }; -/// Interval at which we propagate transactions; -const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); - -/// Maximum number of known transaction hashes to keep for a peer. -/// -/// This should be approx. 2 blocks full of transactions for the network to function properly. -const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. +pub mod config; -/// Maximum allowed size for a transactions notification. -const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; - -/// Maximum number of transaction validation request we keep at any moment. -const MAX_PENDING_TRANSACTIONS: usize = 8192; +/// A set of transactions. +pub type Transactions = Vec; mod rep { use sc_peerset::ReputationChange as Rep; @@ -141,7 +123,7 @@ impl TransactionsHandlerPrototype { pub fn new>( protocol_id: ProtocolId, genesis_hash: Hash, - fork_id: Option, + fork_id: Option<&str>, ) -> Self { let genesis_hash = genesis_hash.as_ref(); let protocol_name = if let Some(fork_id) = fork_id { @@ -158,16 +140,16 @@ impl TransactionsHandlerPrototype { } /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> config::NonDefaultSetConfig { - config::NonDefaultSetConfig { + pub fn set_config(&self) -> NonDefaultSetConfig { + NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, - set_config: config::SetConfig { + set_config: SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: config::NonReservedPeerMode::Deny, + non_reserved_mode: NonReservedPeerMode::Deny, }, } } @@ -176,23 +158,25 @@ impl TransactionsHandlerPrototype { /// the behaviour of the handler while it's running. /// /// Important: the transactions handler is initially disabled and doesn't gossip transactions. - /// You must call [`TransactionsHandlerController::set_gossip_enabled`] to enable it. - pub fn build( + /// Gossiping is enabled when major syncing is done. + pub fn build< + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, + >( self, - service: Arc>, + service: S, transaction_pool: Arc>, metrics_registry: Option<&Registry>, - ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { + ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { let event_stream = service.event_stream("transactions-handler"); let (to_handler, from_controller) = mpsc::unbounded(); - let gossip_enabled = Arc::new(AtomicBool::new(false)); let handler = TransactionsHandler { protocol_name: self.protocol_name, propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_transactions: FuturesUnordered::new(), pending_transactions_peers: HashMap::new(), - gossip_enabled: gossip_enabled.clone(), service, event_stream, peers: HashMap::new(), @@ -205,7 +189,7 @@ impl TransactionsHandlerPrototype { }, }; - let controller = TransactionsHandlerController { to_handler, gossip_enabled }; + let controller = TransactionsHandlerController { to_handler }; Ok((handler, controller)) } @@ -214,15 +198,9 @@ impl TransactionsHandlerPrototype { /// Controls the behaviour of a [`TransactionsHandler`] it is connected to. pub struct TransactionsHandlerController { to_handler: mpsc::UnboundedSender>, - gossip_enabled: Arc, } impl TransactionsHandlerController { - /// Controls whether transactions are being gossiped on the network. - pub fn set_gossip_enabled(&mut self, enabled: bool) { - self.gossip_enabled.store(enabled, Ordering::Relaxed); - } - /// You may call this when new transactions are imported by the transaction pool. /// /// All transactions will be fetched from the `TransactionPool` that was passed at @@ -246,7 +224,11 @@ enum ToHandler { } /// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing. -pub struct TransactionsHandler { +pub struct TransactionsHandler< + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, +> { protocol_name: ProtocolName, /// Interval at which we call `propagate_transactions`. propagate_timeout: Pin + Send>>, @@ -258,13 +240,12 @@ pub struct TransactionsHandler { /// multiple times concurrently. pending_transactions_peers: HashMap>, /// Network service to use to send messages and manage peers. - service: Arc>, + service: S, /// Stream of networking events. event_stream: Pin + Send>>, // All connected peers peers: HashMap>, transaction_pool: Arc>, - gossip_enabled: Arc, from_controller: mpsc::UnboundedReceiver>, /// Prometheus metrics. metrics: Option, @@ -278,7 +259,12 @@ struct Peer { role: ObservedRole, } -impl TransactionsHandler { +impl TransactionsHandler +where + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, +{ /// Turns the [`TransactionsHandler`] into a future that should run forever and not be /// interrupted. pub async fn run(mut self) { @@ -360,9 +346,9 @@ impl TransactionsHandler { continue } - if let Ok(m) = as Decode>::decode( - &mut message.as_ref(), - ) { + if let Ok(m) = + as Decode>::decode(&mut message.as_ref()) + { self.on_transactions(remote, m); } else { warn!(target: "sub-libp2p", "Failed to decode transactions list"); @@ -376,10 +362,10 @@ impl TransactionsHandler { } /// Called when peer sends us new transactions - fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { - trace!(target: "sync", "{} Ignoring transactions while disabled", who); + fn on_transactions(&mut self, who: PeerId, transactions: Transactions) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { + trace!(target: "sync", "{} Ignoring transactions while major syncing", who); return } @@ -428,10 +414,11 @@ impl TransactionsHandler { /// Propagate one transaction. pub fn propagate_transaction(&mut self, hash: &H) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { return } + debug!(target: "sync", "Propagating transaction [{:?}]", hash); if let Some(transaction) = self.transaction_pool.transaction(hash) { let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); @@ -479,10 +466,11 @@ impl TransactionsHandler { /// Call when we must propagate ready transactions to peers. fn propagate_transactions(&mut self) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { return } + debug!(target: "sync", "Propagating transactions"); let transactions = self.transaction_pool.transactions(); let propagated_to = self.do_propagate_transactions(&transactions); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 3c574ef13c8e6..e46c65cf018f5 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -56,6 +56,7 @@ sc-network-bitswap = { version = "0.10.0-dev", path = "../network/bitswap" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-light = { version = "0.10.0-dev", path = "../network/light" } sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } +sc-network-transactions = { version = "0.10.0-dev", path = "../network/transactions" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5a2f4cf978b41..dfd532a14c172 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -40,7 +40,7 @@ use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ - service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, + service::{NetworkStateInfo, NetworkStatusProvider}, sync::warp::WarpSyncProvider, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -326,7 +326,6 @@ where pub trait SpawnTaskNetwork: sc_offchain::NetworkProvider + NetworkStateInfo - + NetworkTransaction + NetworkStatusProvider + Send + Sync @@ -339,7 +338,6 @@ where Block: BlockT, T: sc_offchain::NetworkProvider + NetworkStateInfo - + NetworkTransaction + NetworkStatusProvider + Send + Sync @@ -368,6 +366,9 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub network: Arc>, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, + /// Controller for transactions handlers + pub tx_handler_controller: + sc_network_transactions::TransactionsHandlerController<::Hash>, /// Telemetry instance for this node. pub telemetry: Option<&'a mut Telemetry>, } @@ -446,6 +447,7 @@ where rpc_builder, network, system_rpc_tx, + tx_handler_controller, telemetry, } = params; @@ -481,7 +483,11 @@ where spawn_handle.spawn( "on-transaction-imported", Some("transaction-pool"), - transaction_notifications(transaction_pool.clone(), network.clone(), telemetry.clone()), + transaction_notifications( + transaction_pool.clone(), + tx_handler_controller, + telemetry.clone(), + ), ); // Prometheus metrics. @@ -544,20 +550,21 @@ where Ok(rpc_handlers) } -async fn transaction_notifications( +async fn transaction_notifications( transaction_pool: Arc, - network: Network, + tx_handler_controller: sc_network_transactions::TransactionsHandlerController< + ::Hash, + >, telemetry: Option, ) where Block: BlockT, ExPool: MaintainedTransactionPool::Hash>, - Network: NetworkTransaction<::Hash> + Send + Sync, { // transaction notifications transaction_pool .import_notification_stream() .for_each(move |hash| { - network.propagate_transaction(hash); + tx_handler_controller.propagate_transaction(hash); let status = transaction_pool.status(); telemetry!( telemetry; @@ -719,6 +726,7 @@ pub fn build_network( ( Arc::Hash>>, TracingUnboundedSender>, + sc_network_transactions::TransactionsHandlerController<::Hash>, NetworkStarter, ), Error, @@ -761,9 +769,6 @@ where } } - let transaction_pool_adapter = - Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }); - let protocol_id = config.protocol_id(); let block_announce_validator = if let Some(f) = block_announce_validator_builder { @@ -845,7 +850,7 @@ where protocol_config })); - let network_params = sc_network::config::Params { + let mut network_params = sc_network::config::Params { role: config.role.clone(), executor: { let spawn_handle = Clone::clone(&spawn_handle); @@ -853,16 +858,9 @@ where spawn_handle.spawn("libp2p-node", Some("networking"), fut); })) }, - transactions_handler_executor: { - let spawn_handle = Clone::clone(&spawn_handle); - Box::new(move |fut| { - spawn_handle.spawn("network-transactions-handler", Some("networking"), fut); - }) - }, network_config: config.network.clone(), chain: client.clone(), - transaction_pool: transaction_pool_adapter as _, - protocol_id, + protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), @@ -877,10 +875,32 @@ where .collect::>(), }; + // crate transactions protocol and add it to the list of supported protocols of `network_params` + let transactions_handler_proto = sc_network_transactions::TransactionsHandlerPrototype::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + ); + network_params + .network_config + .extra_sets + .insert(0, transactions_handler_proto.set_config()); + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); + let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( + network.clone(), + Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), + config.prometheus_config.as_ref().map(|config| &config.registry), + )?; + spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let future = build_network_future( @@ -928,7 +948,7 @@ where future.await }); - Ok((network, system_rpc_tx, NetworkStarter(network_start_tx))) + Ok((network, system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx))) } /// Object used to start the network. diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 44153e3b914f3..bca0697bcbd08 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -24,13 +24,11 @@ pub use sc_executor::WasmExecutionMethod; #[cfg(feature = "wasmtime")] pub use sc_executor::WasmtimeInstantiationStrategy; pub use sc_network::{ - config::{ - NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, SetConfig, TransportConfig, - }, + config::{NetworkConfiguration, NodeKeyConfig, Role}, Multiaddr, }; pub use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 0d702c7f37b98..001a83922d776 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -19,7 +19,6 @@ //! Errors that can occur during the service operation. use sc_keystore; -use sc_network; use sp_blockchain; use sp_consensus; @@ -41,7 +40,7 @@ pub enum Error { Consensus(#[from] sp_consensus::Error), #[error(transparent)] - Network(#[from] sc_network::error::Error), + Network(#[from] sc_network_common::error::Error), #[error(transparent)] Keystore(#[from] sc_keystore::Error), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 19358c1e5bc4c..091b4bbe9fe5f 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -72,7 +72,7 @@ pub use sc_chain_spec::{ pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use sc_network::config::{TransactionImport, TransactionImportFuture}; +pub use sc_network_transactions::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::{ RandomIntegerSubscriptionId, RandomStringSubscriptionId, RpcSubscriptionIdProvider, }; @@ -148,7 +148,7 @@ async fn build_network_future< + Send + Sync + 'static, - H: sc_network::ExHashT, + H: sc_network_common::ExHashT, >( role: Role, mut network: sc_network::NetworkWorker, @@ -415,7 +415,8 @@ where .collect() } -impl sc_network::config::TransactionPool for TransactionPoolAdapter +impl sc_network_transactions::config::TransactionPool + for TransactionPoolAdapter where C: HeaderBackend + BlockBackend diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 23245d46cba10..5d29d34a3cbf2 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -22,12 +22,9 @@ use futures::{task::Poll, Future, TryFutureExt as _}; use log::{debug, info}; use parking_lot::Mutex; use sc_client_api::{Backend, CallExecutor}; -use sc_network::{ - config::{NetworkConfiguration, TransportConfig}, - multiaddr, -}; +use sc_network::{config::NetworkConfiguration, multiaddr}; use sc_network_common::{ - config::MultiaddrWithPeerId, + config::{MultiaddrWithPeerId, TransportConfig}, service::{NetworkBlock, NetworkPeers, NetworkStateInfo}, }; use sc_service::{ From 519fbaae886e2773b37363970433d36cbb47d853 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 27 Sep 2022 08:08:14 +0100 Subject: [PATCH 09/75] =?UTF-8?q?export=20more=20types=20from=20fast-untsa?= =?UTF-8?q?ke=20=F0=9F=A4=A6=E2=80=8D=E2=99=82=EF=B8=8F=20(#12353)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * export more types from fast-untsake 🤦‍♂️ * make non-test * fmt --- frame/fast-unstake/src/lib.rs | 3 ++- frame/fast-unstake/src/types.rs | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 9bfb29f8457fa..7fbac8560ea6c 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -60,7 +60,7 @@ mod tests; // NOTE: enable benchmarking in tests as well. #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -mod types; +pub mod types; pub mod weights; pub const LOG_TARGET: &'static str = "runtime::fast-unstake"; @@ -90,6 +90,7 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; + pub use types::PreventStakingOpsIfUnbonding; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index e8d538dce4802..2ddb8dca27e9e 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -47,7 +47,6 @@ pub struct UnstakeRequest(sp_std::marker::PhantomData); -#[cfg(test)] impl PreventStakingOpsIfUnbonding { pub fn new() -> Self { Self(Default::default()) From 1763ff2273c4649fa969167503951371141a0272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 27 Sep 2022 11:37:45 +0200 Subject: [PATCH 10/75] Fix compilation on 1.66 nightly (#12363) --- primitives/state-machine/src/trie_backend_essence.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index dda7b51ab08c6..cd2a71163e2ee 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -178,7 +178,10 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> R { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); - let recorder = recorder.as_mut().map(|r| r as _); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; let mut cache = self .trie_node_cache @@ -216,7 +219,10 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> (Option, R), ) -> R { let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); - let recorder = recorder.as_mut().map(|r| r as _); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; let result = if let Some(local_cache) = self.trie_node_cache.as_ref() { let mut cache = local_cache.as_local_trie_cache().as_trie_db_mut_cache(); From edca89177318580878bf11c0d49586ceea23909b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 27 Sep 2022 13:16:30 +0200 Subject: [PATCH 11/75] Relax Slots-based engines from Epochs (#12360) Remove Epochs reference from slots subsystem --- client/consensus/aura/src/lib.rs | 14 ++++++------- client/consensus/babe/src/lib.rs | 12 ++++------- client/consensus/slots/src/lib.rs | 35 +++++++++++++++---------------- 3 files changed, 28 insertions(+), 33 deletions(-) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index c538200bb315c..a0eed6e35310e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -257,7 +257,7 @@ pub fn build_aura_worker( SyncOracle = SO, JustificationSyncLink = L, Claim = P::Public, - EpochData = Vec>, + AuxData = Vec>, > where B: BlockT, @@ -330,7 +330,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type Claim = P::Public; - type EpochData = Vec>; + type AuxData = Vec>; fn logging_target(&self) -> &'static str { "aura" @@ -340,15 +340,15 @@ where &mut self.block_import } - fn epoch_data( + fn aux_data( &self, header: &B::Header, _slot: Slot, - ) -> Result { + ) -> Result { authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { + fn authorities_len(&self, epoch_data: &Self::AuxData) -> Option { Some(epoch_data.len()) } @@ -356,7 +356,7 @@ where &self, _header: &B::Header, slot: Slot, - epoch_data: &Self::EpochData, + epoch_data: &Self::AuxData, ) -> Option { let expected_author = slot_author::

(slot, epoch_data); expected_author.and_then(|p| { @@ -382,7 +382,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - _epoch: Self::EpochData, + _epoch: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index aef4785b7bb81..109e5aade02a7 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -729,7 +729,6 @@ where BS: BackoffAuthoringBlocksStrategy> + Sync, Error: std::error::Error + Send + From + From + 'static, { - type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; type JustificationSyncLink = L; @@ -737,6 +736,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; + type AuxData = ViableEpochDescriptor, Epoch>; fn logging_target(&self) -> &'static str { "babe" @@ -746,11 +746,7 @@ where &mut self.block_import } - fn epoch_data( - &self, - parent: &B::Header, - slot: Slot, - ) -> Result { + fn aux_data(&self, parent: &B::Header, slot: Slot) -> Result { self.epoch_changes .shared_data() .epoch_descriptor_for_child_of( @@ -763,7 +759,7 @@ where .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { self.epoch_changes .shared_data() .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) @@ -823,7 +819,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, (_, public): Self::Claim, - epoch_descriptor: Self::EpochData, + epoch_descriptor: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 7c5d5d4a73bc1..6225bbbda1745 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -101,8 +101,8 @@ pub trait SimpleSlotWorker { /// Data associated with a slot claim. type Claim: Send + Sync + 'static; - /// Epoch data necessary for authoring. - type EpochData: Send + Sync + 'static; + /// Auxiliary data necessary for authoring. + type AuxData: Send + Sync + 'static; /// The logging target to use when logging messages. fn logging_target(&self) -> &'static str; @@ -110,29 +110,28 @@ pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. fn block_import(&mut self) -> &mut Self::BlockImport; - /// Returns the epoch data necessary for authoring. For time-dependent epochs, - /// use the provided slot number as a canonical source of time. - fn epoch_data( + /// Returns the auxiliary data necessary for authoring. + fn aux_data( &self, header: &B::Header, slot: Slot, - ) -> Result; + ) -> Result; - /// Returns the number of authorities given the epoch data. + /// Returns the number of authorities. /// None indicate that the authorities information is incomplete. - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; + fn authorities_len(&self, aux_data: &Self::AuxData) -> Option; /// Tries to claim the given slot, returning an object with claim data if successful. async fn claim_slot( &self, header: &B::Header, slot: Slot, - epoch_data: &Self::EpochData, + aux_data: &Self::AuxData, ) -> Option; /// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we /// need to author blocks or not. - fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} + fn notify_slot(&self, _header: &B::Header, _slot: Slot, _aux_data: &Self::AuxData) {} /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec; @@ -145,7 +144,7 @@ pub trait SimpleSlotWorker { body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - epoch: Self::EpochData, + epoch: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, @@ -268,12 +267,12 @@ pub trait SimpleSlotWorker { Delay::new(proposing_remaining_duration) }; - let epoch_data = match self.epoch_data(&slot_info.chain_head, slot) { - Ok(epoch_data) => epoch_data, + let aux_data = match self.aux_data(&slot_info.chain_head, slot) { + Ok(aux_data) => aux_data, Err(err) => { warn!( target: logging_target, - "Unable to fetch epoch data at block {:?}: {}", + "Unable to fetch auxiliary data for block {:?}: {}", slot_info.chain_head.hash(), err, ); @@ -290,9 +289,9 @@ pub trait SimpleSlotWorker { }, }; - self.notify_slot(&slot_info.chain_head, slot, &epoch_data); + self.notify_slot(&slot_info.chain_head, slot, &aux_data); - let authorities_len = self.authorities_len(&epoch_data); + let authorities_len = self.authorities_len(&aux_data); if !self.force_authoring() && self.sync_oracle().is_offline() && @@ -309,7 +308,7 @@ pub trait SimpleSlotWorker { return None } - let claim = self.claim_slot(&slot_info.chain_head, slot, &epoch_data).await?; + let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?; if self.should_backoff(slot, &slot_info.chain_head) { return None @@ -351,7 +350,7 @@ pub trait SimpleSlotWorker { body.clone(), proposal.storage_changes, claim, - epoch_data, + aux_data, ) .await { From 2a6c314cdce2b7813fbe2af2d21388ff5ededcbe Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Tue, 27 Sep 2022 13:44:20 +0200 Subject: [PATCH 12/75] Pallet staking events to named enum (#12342) * Pallet staking events to named enum * fmt * update np staking tests * update remaining events * update benchmarks * Update frame/nomination-pools/test-staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/staking/src/pallet/mod.rs * Update frame/staking/src/pallet/mod.rs * Update frame/staking/src/lib.rs * Update frame/staking/src/pallet/impls.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: parity-processbot <> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- .../nomination-pools/test-staking/src/lib.rs | 109 ++++++++++++------ frame/offences/benchmarking/src/lib.rs | 4 +- frame/staking/src/lib.rs | 4 +- frame/staking/src/pallet/impls.rs | 26 +++-- frame/staking/src/pallet/mod.rs | 60 +++++----- frame/staking/src/slashing.rs | 5 +- frame/staking/src/tests.rs | 55 +++++---- 7 files changed, 165 insertions(+), 98 deletions(-) diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 7d848e98174b4..00e0e40ce33b0 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -41,7 +41,10 @@ fn pool_lifecycle_e2e() { // have the pool nominate. assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 50),]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -56,7 +59,10 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, 10), StakingEvent::Bonded(POOL1_BONDED, 10),] + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] ); assert_eq!( pool_events_since_last_call(), @@ -87,8 +93,8 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, ] ); assert_eq!( @@ -131,7 +137,7 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 20),] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },] ); assert_eq!( pool_events_since_last_call(), @@ -155,7 +161,10 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Chilled(POOL1_BONDED), StakingEvent::Unbonded(POOL1_BONDED, 50),] + vec![ + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, + ] ); assert_eq!( pool_events_since_last_call(), @@ -169,7 +178,7 @@ fn pool_lifecycle_e2e() { // pools is fully destroyed now. assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 50),] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] ); assert_eq!( pool_events_since_last_call(), @@ -193,7 +202,10 @@ fn pool_slash_e2e() { assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -210,7 +222,10 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, 20), StakingEvent::Bonded(POOL1_BONDED, 20)] + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 } + ] ); assert_eq!( pool_events_since_last_call(), @@ -230,8 +245,8 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10) + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 } ] ); assert_eq!( @@ -253,9 +268,9 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, ] ); @@ -278,7 +293,10 @@ fn pool_slash_e2e() { 2, // slash era 2, affects chunks at era 5 onwards. ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -302,7 +320,10 @@ fn pool_slash_e2e() { unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) } ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, 5)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }] + ); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] @@ -327,7 +348,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), // a 10 (un-slashed) + 10/2 (slashed) balance from 10 has also been unlocked - vec![StakingEvent::Withdrawn(POOL1_BONDED, 15 + 10 + 15)] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 + 10 + 15 }] ); // now, finally, we can unbond the depositor further than their current limit. @@ -336,7 +357,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, 10)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }] ); assert_eq!( pool_events_since_last_call(), @@ -361,7 +382,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 10)] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }] ); assert_eq!( pool_events_since_last_call(), @@ -388,7 +409,10 @@ fn pool_slash_proportional() { assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -406,9 +430,9 @@ fn pool_slash_proportional() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Bonded(POOL1_BONDED, bond), - StakingEvent::Bonded(POOL1_BONDED, bond), - StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, ] ); assert_eq!( @@ -428,7 +452,7 @@ fn pool_slash_proportional() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -445,7 +469,7 @@ fn pool_slash_proportional() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -462,7 +486,7 @@ fn pool_slash_proportional() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -486,7 +510,10 @@ fn pool_slash_proportional() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -517,7 +544,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -531,7 +561,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -543,7 +573,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -567,7 +597,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] @@ -590,7 +623,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -604,7 +640,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -616,7 +652,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -640,7 +676,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index c9498214eade4..555ec42882ee1 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -309,13 +309,13 @@ benchmarks! { let reward_amount = slash_amount.saturating_mul(1 + n) / 2; let reward = reward_amount / r; let slash = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) + ::RuntimeEvent::from(StakingEvent::::Slashed{staker: id, amount: BalanceOf::::from(slash_amount)}) ); let balance_slash = |id| core::iter::once( ::RuntimeEvent::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) ); let chill = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Chilled(id)) + ::RuntimeEvent::from(StakingEvent::::Chilled{stash: id}) ); let balance_deposit = |id, amount: u32| ::RuntimeEvent::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index df568d6b596ba..eb30671d35a57 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -953,7 +953,9 @@ where if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { R::report_offence(reporters, offence) } else { - >::deposit_event(Event::::OldSlashingReportDiscarded(offence_session)); + >::deposit_event(Event::::OldSlashingReportDiscarded { + session_index: offence_session, + }); Ok(()) } } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 399f50aaed865..6da27da362b53 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -181,14 +181,20 @@ impl Pallet { let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; - Self::deposit_event(Event::::PayoutStarted(era, ledger.stash.clone())); + Self::deposit_event(Event::::PayoutStarted { + era_index: era, + validator_stash: ledger.stash.clone(), + }); let mut total_imbalance = PositiveImbalanceOf::::zero(); // We can now make total validator payout: if let Some(imbalance) = Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) { - Self::deposit_event(Event::::Rewarded(ledger.stash, imbalance.peek())); + Self::deposit_event(Event::::Rewarded { + stash: ledger.stash, + amount: imbalance.peek(), + }); total_imbalance.subsume(imbalance); } @@ -208,7 +214,8 @@ impl Pallet { if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. nominator_payout_count += 1; - let e = Event::::Rewarded(nominator.who.clone(), imbalance.peek()); + let e = + Event::::Rewarded { stash: nominator.who.clone(), amount: imbalance.peek() }; Self::deposit_event(e); total_imbalance.subsume(imbalance); } @@ -232,7 +239,7 @@ impl Pallet { let chilled_as_validator = Self::do_remove_validator(stash); let chilled_as_nominator = Self::do_remove_nominator(stash); if chilled_as_validator || chilled_as_nominator { - Self::deposit_event(Event::::Chilled(stash.clone())); + Self::deposit_event(Event::::Chilled { stash: stash.clone() }); } } @@ -391,13 +398,18 @@ impl Pallet { let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); let staked = Self::eras_total_stake(&active_era.index); let issuance = T::Currency::total_issuance(); - let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); + let (validator_payout, remainder) = + T::EraPayout::era_payout(staked, issuance, era_duration); - Self::deposit_event(Event::::EraPaid(active_era.index, validator_payout, rest)); + Self::deposit_event(Event::::EraPaid { + era_index: active_era.index, + validator_payout, + remainder, + }); // Set ending era reward. >::insert(&active_era.index, validator_payout); - T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); + T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); // Clear offending validators. >::kill(); diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 4db3870c62d8b..6e97697736223 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -653,39 +653,36 @@ pub mod pallet { pub enum Event { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. - /// \[era_index, validator_payout, remainder\] - EraPaid(EraIndex, BalanceOf, BalanceOf), - /// The nominator has been rewarded by this amount. \[stash, amount\] - Rewarded(T::AccountId, BalanceOf), + EraPaid { era_index: EraIndex, validator_payout: BalanceOf, remainder: BalanceOf }, + /// The nominator has been rewarded by this amount. + Rewarded { stash: T::AccountId, amount: BalanceOf }, /// One staker (and potentially its nominators) has been slashed by the given amount. - /// \[staker, amount\] - Slashed(T::AccountId, BalanceOf), + Slashed { staker: T::AccountId, amount: BalanceOf }, /// An old slashing report from a prior era was discarded because it could - /// not be processed. \[session_index\] - OldSlashingReportDiscarded(SessionIndex), + /// not be processed. + OldSlashingReportDiscarded { session_index: SessionIndex }, /// A new set of stakers was elected. StakersElected, /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, /// it will not be emitted for staking rewards when they are added to stake. - Bonded(T::AccountId, BalanceOf), - /// An account has unbonded this amount. \[stash, amount\] - Unbonded(T::AccountId, BalanceOf), + Bonded { stash: T::AccountId, amount: BalanceOf }, + /// An account has unbonded this amount. + Unbonded { stash: T::AccountId, amount: BalanceOf }, /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` - /// from the unlocking queue. \[stash, amount\] - Withdrawn(T::AccountId, BalanceOf), - /// A nominator has been kicked from a validator. \[nominator, stash\] - Kicked(T::AccountId, T::AccountId), + /// from the unlocking queue. + Withdrawn { stash: T::AccountId, amount: BalanceOf }, + /// A nominator has been kicked from a validator. + Kicked { nominator: T::AccountId, stash: T::AccountId }, /// The election failed. No new era is planned. StakingElectionFailed, /// An account has stopped participating as either a validator or nominator. - /// \[stash\] - Chilled(T::AccountId), - /// The stakers' rewards are getting paid. \[era_index, validator_stash\] - PayoutStarted(EraIndex, T::AccountId), + Chilled { stash: T::AccountId }, + /// The stakers' rewards are getting paid. + PayoutStarted { era_index: EraIndex, validator_stash: T::AccountId }, /// A validator has set their preferences. - ValidatorPrefsSet(T::AccountId, ValidatorPrefs), + ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs }, } #[pallet::error] @@ -850,7 +847,7 @@ pub mod pallet { let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); - Self::deposit_event(Event::::Bonded(stash.clone(), value)); + Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); let item = StakingLedger { stash, total: value, @@ -911,7 +908,7 @@ pub mod pallet { T::VoterList::on_update(&stash, Self::weight_of(&ledger.stash)).defensive(); } - Self::deposit_event(Event::::Bonded(stash, extra)); + Self::deposit_event(Event::::Bonded { stash, amount: extra }); } Ok(()) } @@ -994,7 +991,7 @@ pub mod pallet { .defensive(); } - Self::deposit_event(Event::::Unbonded(ledger.stash, value)); + Self::deposit_event(Event::::Unbonded { stash: ledger.stash, amount: value }); } Ok(()) } @@ -1050,7 +1047,7 @@ pub mod pallet { if ledger.total < old_total { // Already checked that this won't overflow by entry condition. let value = old_total - ledger.total; - Self::deposit_event(Event::::Withdrawn(stash, value)); + Self::deposit_event(Event::::Withdrawn { stash, amount: value }); } Ok(post_info_weight.into()) @@ -1088,7 +1085,7 @@ pub mod pallet { Self::do_remove_nominator(stash); Self::do_add_validator(stash, prefs.clone()); - Self::deposit_event(Event::::ValidatorPrefsSet(ledger.stash, prefs)); + Self::deposit_event(Event::::ValidatorPrefsSet { stash: ledger.stash, prefs }); Ok(()) } @@ -1471,7 +1468,10 @@ pub mod pallet { // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); - Self::deposit_event(Event::::Bonded(ledger.stash.clone(), rebonded_value)); + Self::deposit_event(Event::::Bonded { + stash: ledger.stash.clone(), + amount: rebonded_value, + }); // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); @@ -1546,10 +1546,10 @@ pub mod pallet { if let Some(ref mut nom) = maybe_nom { if let Some(pos) = nom.targets.iter().position(|v| v == stash) { nom.targets.swap_remove(pos); - Self::deposit_event(Event::::Kicked( - nom_stash.clone(), - stash.clone(), - )); + Self::deposit_event(Event::::Kicked { + nominator: nom_stash.clone(), + stash: stash.clone(), + }); } } }); diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index f3272a25fab5c..a1900136d64fd 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -626,7 +626,10 @@ pub fn do_slash( >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event(super::Event::::Slashed(stash.clone(), value)); + >::deposit_event(super::Event::::Slashed { + staker: stash.clone(), + amount: value, + }); } } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 6798a78030f9e..8ec98da99ecb1 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -303,7 +303,11 @@ fn rewards_should_work() { assert_eq!(mock::RewardRemainderUnbalanced::get(), maximum_payout - total_payout_0,); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid(0, total_payout_0, maximum_payout - total_payout_0) + Event::EraPaid { + era_index: 0, + validator_payout: total_payout_0, + remainder: maximum_payout - total_payout_0 + } ); mock::make_all_reward_payment(0); @@ -341,7 +345,11 @@ fn rewards_should_work() { ); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid(1, total_payout_1, maximum_payout - total_payout_1) + Event::EraPaid { + era_index: 1, + validator_payout: total_payout_1, + remainder: maximum_payout - total_payout_1 + } ); mock::make_all_reward_payment(1); @@ -1645,7 +1653,7 @@ fn rebond_emits_right_value_in_event() { }) ); // Event emitted should be correct - assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 100)); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 100 }); // Re-bond way more than available Staking::rebond(RuntimeOrigin::signed(10), 100_000).unwrap(); @@ -1660,7 +1668,7 @@ fn rebond_emits_right_value_in_event() { }) ); // Event emitted should be correct, only 800 - assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 800)); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 800 }); }); } @@ -2870,9 +2878,9 @@ fn deferred_slashes_are_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 11075, 33225), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 } ] ); }) @@ -2901,9 +2909,9 @@ fn retroactive_deferred_slashes_two_eras_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 7100, 21300), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 3, validator_payout: 7100, remainder: 21300 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, ] ); }) @@ -2934,7 +2942,10 @@ fn retroactive_deferred_slashes_one_before() { mock::start_active_era(4); assert_eq!( staking_events_since_last_call(), - vec![Event::StakersElected, Event::EraPaid(3, 11075, 33225)] + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 } + ] ); assert_eq!(Staking::ledger(10).unwrap().total, 1000); @@ -2944,9 +2955,9 @@ fn retroactive_deferred_slashes_one_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(4, 11075, 33225), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 4, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 } ] ); @@ -3090,9 +3101,9 @@ fn remove_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 11075, 33225), - Event::Slashed(11, 50), - Event::Slashed(101, 7) + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 50 }, + Event::Slashed { staker: 101, amount: 7 } ] ); @@ -4057,7 +4068,7 @@ fn offences_weight_calculated_correctly() { &one_offender, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed + DisableStrategy::WhenSlashed{} ), one_offence_unapplied_weight ); @@ -4955,10 +4966,10 @@ fn min_commission_works() { // event emitted should be correct assert_eq!( *staking_events().last().unwrap(), - Event::ValidatorPrefsSet( - 11, - ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } - ) + Event::ValidatorPrefsSet { + stash: 11, + prefs: ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } + } ); assert_ok!(Staking::set_staking_configs( From 74daaf1eb23686991a40c6cc361940421322472b Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Tue, 27 Sep 2022 17:44:16 +0200 Subject: [PATCH 13/75] [fix] Bound staking ledger correctly with MaxUnlockingChunks from configuration (#12343) * used maxunlockingchunks from config * mhl MaxUnlockingChunks * no migration needed * changes as per requested * fmt * fix tests * fix benchmark * warning in the doc for abrupt changes in the config * less unnecessary details in the test * fix tests Co-authored-by: mrisholukamba Co-authored-by: parity-processbot <> --- frame/staking/src/benchmarking.rs | 4 +- frame/staking/src/lib.rs | 7 +--- frame/staking/src/mock.rs | 3 +- frame/staking/src/pallet/mod.rs | 27 +++++++++----- frame/staking/src/tests.rs | 61 +++++++++++++++++++++++++++++-- 5 files changed, 80 insertions(+), 22 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 1ea05bba3b579..c7e6936ac75d8 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -613,7 +613,7 @@ benchmarks! { } rebond { - let l in 1 .. MaxUnlockingChunks::get() as u32; + let l in 1 .. T::MaxUnlockingChunks::get() as u32; // clean up any existing state. clear_validators_and_nominators::(); @@ -764,7 +764,7 @@ benchmarks! { #[extra] do_slash { - let l in 1 .. MaxUnlockingChunks::get() as u32; + let l in 1 .. T::MaxUnlockingChunks::get() as u32; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); let unlock_chunk = UnlockChunk::> { diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index eb30671d35a57..a0144463540be 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -301,7 +301,6 @@ mod pallet; use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ - parameter_types, traits::{Currency, Defensive, Get}, weights::Weight, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, @@ -349,10 +348,6 @@ type NegativeImbalanceOf = <::Currency as Currency< type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -parameter_types! { - pub MaxUnlockingChunks: u32 = 32; -} - /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ActiveEraInfo { @@ -465,7 +460,7 @@ pub struct StakingLedger { /// Any balance that is becoming free, which may eventually be transferred out of the stash /// (assuming it doesn't get slashed first). It is assumed that this will be treated as a first /// in, first out queue where the new (higher value) eras get pushed on the back. - pub unlocking: BoundedVec>, MaxUnlockingChunks>, + pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, /// List of eras for which the stakers behind a validator have claimed rewards. Only updated /// for validators. pub claimed_rewards: BoundedVec, diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 385087f9bec41..3a9351ef4a271 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -237,6 +237,7 @@ parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; pub static MaxNominations: u32 = 16; pub static HistoryDepth: u32 = 80; + pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } @@ -301,7 +302,7 @@ impl crate::pallet::pallet::Config for Test { // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. type VoterList = VoterBagsList; type TargetList = UseValidatorsMap; - type MaxUnlockingChunks = ConstU32<32>; + type MaxUnlockingChunks = MaxUnlockingChunks; type HistoryDepth = HistoryDepth; type OnStakerSlash = OnStakerSlashMock; type BenchmarkingConfig = TestBenchmarkingConfig; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 6e97697736223..560c3b6ed830c 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -43,9 +43,9 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, - PositiveImbalanceOf, Releases, RewardDestination, SessionInterface, StakingLedger, - UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, Forcing, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, + Releases, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + ValidatorPrefs, }; const STAKING_ID: LockIdentifier = *b"staking "; @@ -142,8 +142,9 @@ pub mod pallet { /// /// Note: `HistoryDepth` is used as the upper bound for the `BoundedVec` /// item `StakingLedger.claimed_rewards`. Setting this value lower than - /// the existing value can lead to inconsistencies and will need to be - /// handled properly in a migration. + /// the existing value can lead to inconsistencies in the + /// `StakingLedger` and will need to be handled properly in a migration. + /// The test `reducing_history_depth_abrupt` shows this effect. #[pallet::constant] type HistoryDepth: Get; @@ -237,8 +238,16 @@ pub mod pallet { /// VALIDATOR. type TargetList: SortedListProvider>; - /// The maximum number of `unlocking` chunks a [`StakingLedger`] can have. Effectively - /// determines how many unique eras a staker may be unbonding in. + /// The maximum number of `unlocking` chunks a [`StakingLedger`] can + /// have. Effectively determines how many unique eras a staker may be + /// unbonding in. + /// + /// Note: `MaxUnlockingChunks` is used as the upper bound for the + /// `BoundedVec` item `StakingLedger.unlocking`. Setting this value + /// lower than the existing value can lead to inconsistencies in the + /// `StakingLedger` and will need to be handled properly in a runtime + /// migration. The test `reducing_max_unlocking_chunks_abrupt` shows + /// this effect. #[pallet::constant] type MaxUnlockingChunks: Get; @@ -940,7 +949,7 @@ pub mod pallet { let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!( - ledger.unlocking.len() < MaxUnlockingChunks::get() as usize, + ledger.unlocking.len() < T::MaxUnlockingChunks::get() as usize, Error::::NoMoreChunks, ); @@ -1454,7 +1463,7 @@ pub mod pallet { /// - Bounded by `MaxUnlockingChunks`. /// - Storage changes: Can't increase storage, only decrease it. /// # - #[pallet::weight(T::WeightInfo::rebond(MaxUnlockingChunks::get() as u32))] + #[pallet::weight(T::WeightInfo::rebond(T::MaxUnlockingChunks::get() as u32))] pub fn rebond( origin: OriginFor, #[pallet::compact] value: BalanceOf, diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 8ec98da99ecb1..4812c105c0d80 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -17,7 +17,7 @@ //! Tests for the module. -use super::{ConfigOp, Event, MaxUnlockingChunks, *}; +use super::{ConfigOp, Event, *}; use frame_election_provider_support::{ElectionProvider, SortedListProvider, Support}; use frame_support::{ assert_noop, assert_ok, assert_storage_noop, bounded_vec, @@ -1354,7 +1354,8 @@ fn too_many_unbond_calls_should_not_work() { ExtBuilder::default().build_and_execute(|| { let mut current_era = 0; // locked at era MaxUnlockingChunks - 1 until 3 - for i in 0..MaxUnlockingChunks::get() - 1 { + + for i in 0..<::MaxUnlockingChunks as Get>::get() - 1 { // There is only 1 chunk per era, so we need to be in a new era to create a chunk. current_era = i as u32; mock::start_active_era(current_era); @@ -1369,7 +1370,7 @@ fn too_many_unbond_calls_should_not_work() { assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); assert_eq!( Staking::ledger(&10).unwrap().unlocking.len(), - MaxUnlockingChunks::get() as usize + <::MaxUnlockingChunks as Get>::get() as usize ); // can't do more. assert_noop!(Staking::unbond(RuntimeOrigin::signed(10), 1), Error::::NoMoreChunks); @@ -5494,7 +5495,7 @@ fn pre_bonding_era_cannot_be_claimed() { } #[test] -fn reducing_history_depth_without_migration() { +fn reducing_history_depth_abrupt() { // Verifies initial conditions of mock ExtBuilder::default().nominate(false).build_and_execute(|| { let original_history_depth = HistoryDepth::get(); @@ -5571,3 +5572,55 @@ fn reducing_history_depth_without_migration() { HistoryDepth::set(original_history_depth); }); } + +#[test] +fn reducing_max_unlocking_chunks_abrupt() { + // Concern is on validators only + // By Default 11, 10 are stash and ctrl and 21,20 + ExtBuilder::default().build_and_execute(|| { + // given a staker at era=10 and MaxUnlockChunks set to 2 + MaxUnlockingChunks::set(2); + start_active_era(10); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 300, RewardDestination::Staked)); + assert!(matches!(Staking::ledger(4), Some(_))); + + // when staker unbonds + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 20)); + + // then an unlocking chunk is added at `current_era + bonding_duration` + // => 10 + 3 = 13 + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20 as Balance, era: 13 as EraIndex }]; + assert!(matches!(Staking::ledger(4), + Some(StakingLedger { + unlocking, + .. + }) if unlocking==expected_unlocking)); + + // when staker unbonds at next era + start_active_era(11); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 50)); + // then another unlock chunk is added + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20, era: 13 }, UnlockChunk { value: 50, era: 14 }]; + assert!(matches!(Staking::ledger(4), + Some(StakingLedger { + unlocking, + .. + }) if unlocking==expected_unlocking)); + + // when staker unbonds further + start_active_era(12); + // then further unbonding not possible + assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NoMoreChunks); + + // when max unlocking chunks is reduced abruptly to a low value + MaxUnlockingChunks::set(1); + // then unbond, rebond ops are blocked with ledger in corrupt state + assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NotController); + assert_noop!(Staking::rebond(RuntimeOrigin::signed(4), 100), Error::::NotController); + + // reset the ledger corruption + MaxUnlockingChunks::set(2); + }) +} From 94b9646177430adb74d7e4737c98ba333f91c451 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Tue, 27 Sep 2022 19:31:12 +0200 Subject: [PATCH 14/75] [Feature] Add deposit to fast-unstake (#12366) * [Feature] Add deposit to fast-unstake * disable on ErasToCheckPerBlock == 0 * removed signed ext * remove obsolete import * remove some obsolete stuff * fix some comments * fixed all the comments * remove obsolete imports * fix some tests * CallNotAllowed tests * Update frame/fast-unstake/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix tests * fix deregister + tests * more fixes * make sure we go above existential deposit * fixed the last test * some nit fixes * fix node * fix bench * last bench fix * Update frame/fast-unstake/src/lib.rs * ".git/.scripts/fmt.sh" 1 Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: command-bot <> --- bin/node/runtime/src/lib.rs | 3 +- frame/fast-unstake/src/benchmarking.rs | 8 +- frame/fast-unstake/src/lib.rs | 105 +++++--- frame/fast-unstake/src/mock.rs | 19 +- frame/fast-unstake/src/tests.rs | 333 ++++++++++++++++--------- frame/fast-unstake/src/types.rs | 83 +----- 6 files changed, 313 insertions(+), 238 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8ed5f1c847f5e..aa1a525bf095c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -581,8 +581,9 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SlashPerEra = ConstU128<{ DOLLARS }>; type ControlOrigin = frame_system::EnsureRoot; + type Deposit = ConstU128<{ DOLLARS }>; + type DepositCurrency = Balances; type WeightInfo = (); } diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 5690d5ce6f29f..8770cc6b64c0d 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -110,18 +110,18 @@ fn on_idle_full_block() { benchmarks! { // on_idle, we we don't check anyone, but fully unbond and move them to another pool. on_idle_unstake { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), )); - ErasToCheckPerBlock::::put(1); // run on_idle once. This will check era 0. assert_eq!(Head::::get(), None); on_idle_full_block::(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap() }) + Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), deposit: T::Deposit::get() }) ); } : { @@ -162,7 +162,7 @@ benchmarks! { let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked }) + Some(UnstakeRequest { stash: who.clone(), checked, deposit: T::Deposit::get() }) ); assert!(matches!( fast_unstake_events::().last(), @@ -171,6 +171,7 @@ benchmarks! { } register_fast_unstake { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); whitelist_account!(who); assert_eq!(Queue::::count(), 0); @@ -182,6 +183,7 @@ benchmarks! { } deregister { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 7fbac8560ea6c..ed26d6b436e1d 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -81,7 +81,10 @@ pub mod pallet { use super::*; use crate::types::*; use frame_election_provider_support::ElectionProvider; - use frame_support::pallet_prelude::*; + use frame_support::{ + pallet_prelude::*, + traits::{Defensive, ReservableCurrency}, + }; use frame_system::{pallet_prelude::*, RawOrigin}; use pallet_staking::Pallet as Staking; use sp_runtime::{ @@ -90,7 +93,6 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; - pub use types::PreventStakingOpsIfUnbonding; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] @@ -113,10 +115,12 @@ pub mod pallet { + IsType<::RuntimeEvent> + TryInto>; - /// The amount of balance slashed per each era that was wastefully checked. - /// - /// A reasonable value could be `runtime_weight_to_fee(weight_per_era_check)`. - type SlashPerEra: Get>; + /// The currency used for deposits. + type DepositCurrency: ReservableCurrency>; + + /// Deposit to take for unstaking, to make sure we're able to slash the it in order to cover + /// the costs of resources on unsuccessful unstake. + type Deposit: Get>; /// The origin that can control this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; @@ -128,13 +132,13 @@ pub mod pallet { /// The current "head of the queue" being unstaked. #[pallet::storage] pub type Head = - StorageValue<_, UnstakeRequest>, OptionQuery>; + StorageValue<_, UnstakeRequest, BalanceOf>, OptionQuery>; /// The map of all accounts wishing to be unstaked. /// - /// Keeps track of `AccountId` wishing to unstake. + /// Keeps track of `AccountId` wishing to unstake and it's corresponding deposit. #[pallet::storage] - pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; + pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, BalanceOf>; /// Number of eras to check per block. /// @@ -177,6 +181,8 @@ pub mod pallet { NotQueued, /// The provided un-staker is already in Head, and cannot deregister. AlreadyHead, + /// The call is not allowed at this point because the pallet is not active. + CallNotAllowed, } #[pallet::hooks] @@ -214,6 +220,8 @@ pub mod pallet { pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; + ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + let ledger = pallet_staking::Ledger::::get(&ctrl).ok_or(Error::::NotController)?; ensure!(!Queue::::contains_key(&ledger.stash), Error::::AlreadyQueued); @@ -231,8 +239,10 @@ pub mod pallet { Staking::::chill(RawOrigin::Signed(ctrl.clone()).into())?; Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; + T::DepositCurrency::reserve(&ledger.stash, T::Deposit::get())?; + // enqueue them. - Queue::::insert(ledger.stash, ()); + Queue::::insert(ledger.stash, T::Deposit::get()); Ok(()) } @@ -246,6 +256,9 @@ pub mod pallet { #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; + + ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + let stash = pallet_staking::Ledger::::get(&ctrl) .map(|l| l.stash) .ok_or(Error::::NotController)?; @@ -254,7 +267,17 @@ pub mod pallet { Head::::get().map_or(true, |UnstakeRequest { stash, .. }| stash != stash), Error::::AlreadyHead ); - Queue::::remove(stash); + let deposit = Queue::::take(stash.clone()); + + if let Some(deposit) = deposit.defensive() { + let remaining = T::DepositCurrency::unreserve(&stash, deposit); + if !remaining.is_zero() { + frame_support::defensive!("`not enough balance to unreserve`"); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError) + } + } + Ok(()) } @@ -315,18 +338,23 @@ pub mod pallet { return T::DbWeight::get().reads(2) } - let UnstakeRequest { stash, mut checked } = match Head::::take().or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - Queue::::drain() - .map(|(stash, _)| UnstakeRequest { stash, checked: Default::default() }) - .next() - }) { - None => { - // There's no `Head` and nothing in the `Queue`, nothing to do here. - return T::DbWeight::get().reads(4) - }, - Some(head) => head, - }; + let UnstakeRequest { stash, mut checked, deposit } = + match Head::::take().or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + Queue::::drain() + .map(|(stash, deposit)| UnstakeRequest { + stash, + deposit, + checked: Default::default(), + }) + .next() + }) { + None => { + // There's no `Head` and nothing in the `Queue`, nothing to do here. + return T::DbWeight::get().reads(4) + }, + Some(head) => head, + }; log!( debug, @@ -381,9 +409,16 @@ pub mod pallet { num_slashing_spans, ); - log!(info, "unstaked {:?}, outcome: {:?}", stash, result); + let remaining = T::DepositCurrency::unreserve(&stash, deposit); + if !remaining.is_zero() { + frame_support::defensive!("`not enough balance to unreserve`"); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError) + } else { + log!(info, "unstaked {:?}, outcome: {:?}", stash, result); + Self::deposit_event(Event::::Unstaked { stash, result }); + } - Self::deposit_event(Event::::Unstaked { stash, result }); ::WeightInfo::on_idle_unstake() } else { // eras remaining to be checked. @@ -406,22 +441,18 @@ pub mod pallet { // the last 28 eras, have registered yourself to be unstaked, midway being checked, // you are exposed. if is_exposed { - let amount = T::SlashPerEra::get() - .saturating_mul(eras_checked.saturating_add(checked.len() as u32).into()); - pallet_staking::slashing::do_slash::( - &stash, - amount, - &mut Default::default(), - &mut Default::default(), - current_era, - ); - log!(info, "slashed {:?} by {:?}", stash, amount); - Self::deposit_event(Event::::Slashed { stash, amount }); + T::DepositCurrency::slash_reserved(&stash, deposit); + log!(info, "slashed {:?} by {:?}", stash, deposit); + Self::deposit_event(Event::::Slashed { stash, amount: deposit }); } else { // Not exposed in these eras. match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { Ok(_) => { - Head::::put(UnstakeRequest { stash: stash.clone(), checked }); + Head::::put(UnstakeRequest { + stash: stash.clone(), + checked, + deposit, + }); Self::deposit_event(Event::::Checking { stash, eras: unchecked_eras_to_check, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 62f343709e245..4c4c5f9ff26fd 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -164,12 +164,13 @@ impl Convert for U256ToBalance { } parameter_types! { - pub static SlashPerEra: u32 = 100; + pub static DepositAmount: u128 = 7; } impl fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SlashPerEra = SlashPerEra; + type Deposit = DepositAmount; + type DepositCurrency = Balances; type ControlOrigin = frame_system::EnsureRoot; type WeightInfo = (); } @@ -213,11 +214,11 @@ impl Default for ExtBuilder { fn default() -> Self { Self { exposed_nominators: vec![ - (1, 2, 100), - (3, 4, 100), - (5, 6, 100), - (7, 8, 100), - (9, 10, 100), + (1, 2, 7 + 100), + (3, 4, 7 + 100), + (5, 6, 7 + 100), + (7, 8, 7 + 100), + (9, 10, 7 + 100), ], } } @@ -270,8 +271,8 @@ impl ExtBuilder { .into_iter() .map(|(_, ctrl, balance)| (ctrl, balance * 2)), ) - .chain(validators_range.clone().map(|x| (x, 100))) - .chain(nominators_range.clone().map(|x| (x, 100))) + .chain(validators_range.clone().map(|x| (x, 7 + 100))) + .chain(nominators_range.clone().map(|x| (x, 7 + 100))) .collect::>(), } .assimilate_storage(&mut storage); diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index 5586443ce797c..6e617fd992028 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -35,6 +35,7 @@ fn test_setup_works() { #[test] fn register_works() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Ensure stash is in the queue. @@ -42,9 +43,38 @@ fn register_works() { }); } +#[test] +fn register_insufficient_funds_fails() { + use pallet_balances::Error as BalancesError; + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + ::DepositCurrency::make_free_balance_be(&1, 3); + + // Controller account registers for fast unstake. + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), + BalancesError::::InsufficientBalance, + ); + + // Ensure stash is in the queue. + assert_eq!(Queue::::get(1), None); + }); +} + +#[test] +fn register_disabled_fails() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), + Error::::CallNotAllowed + ); + }); +} + #[test] fn cannot_register_if_not_bonded() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Mint accounts 1 and 2 with 200 tokens. for _ in 1..2 { let _ = Balances::make_free_balance_be(&1, 200); @@ -60,8 +90,9 @@ fn cannot_register_if_not_bonded() { #[test] fn cannot_register_if_in_queue() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Insert some Queue item - Queue::::insert(1, ()); + Queue::::insert(1, 10); // Cannot re-register, already in queue assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), @@ -73,8 +104,13 @@ fn cannot_register_if_in_queue() { #[test] fn cannot_register_if_head() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Insert some Head item for stash - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); + Head::::put(UnstakeRequest { + stash: 1, + checked: bounded_vec![], + deposit: DepositAmount::get(), + }); // Controller attempts to regsiter assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), @@ -86,6 +122,7 @@ fn cannot_register_if_head() { #[test] fn cannot_register_if_has_unlocking_chunks() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Start unbonding half of staked tokens assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); // Cannot register for fast unstake with unlock chunks active @@ -99,18 +136,37 @@ fn cannot_register_if_has_unlocking_chunks() { #[test] fn deregister_works() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + // Ensure stash no longer exists in the queue. assert_eq!(Queue::::get(1), None); }); } +#[test] +fn deregister_disabled_fails() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + ErasToCheckPerBlock::::put(0); + assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::CallNotAllowed); + }); +} + #[test] fn cannot_deregister_if_not_controller() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Stash tries to deregister. @@ -121,6 +177,7 @@ fn cannot_deregister_if_not_controller() { #[test] fn cannot_deregister_if_not_queued() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller tries to deregister without first registering assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::NotQueued); }); @@ -129,10 +186,15 @@ fn cannot_deregister_if_not_queued() { #[test] fn cannot_deregister_already_head() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller attempts to register, should fail assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Insert some Head item for stash. - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); + Head::::put(UnstakeRequest { + stash: 1, + checked: bounded_vec![], + deposit: DepositAmount::get(), + }); // Controller attempts to deregister assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); }); @@ -165,14 +227,14 @@ mod on_idle { // set up Queue item assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // call on_idle with no remaining weight FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); // assert nothing changed in Queue and Head assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); }); } @@ -185,7 +247,7 @@ mod on_idle { // given assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); assert_eq!(Queue::::count(), 1); assert_eq!(Head::::get(), None); @@ -204,7 +266,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); // when: another 1 era. @@ -220,7 +286,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // when: then 5 eras, we only need 2 more. @@ -242,7 +312,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: not enough weight to unstake: @@ -254,7 +328,11 @@ mod on_idle { assert_eq!(fast_unstake_events_since_last_call(), vec![]); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: enough weight to get over at least one iteration: then we are unblocked and can @@ -285,12 +363,16 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); + assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -300,7 +382,11 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); assert_eq!(Queue::::count(), 4); @@ -317,10 +403,16 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 5, checked: bounded_vec![3, 2, 1, 0] }), + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 5, + checked: bounded_vec![3, 2, 1, 0] + }), ); assert_eq!(Queue::::count(), 3); + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_eq!( fast_unstake_events_since_last_call(), vec![ @@ -340,9 +432,9 @@ mod on_idle { // register multi accounts for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_eq!(Queue::::get(3), Some(())); + assert_eq!(Queue::::get(3), Some(DepositAmount::get())); // assert 2 queue items are in Queue & None in Head to start with assert_eq!(Queue::::count(), 2); @@ -391,7 +483,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -402,7 +494,11 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -425,9 +521,11 @@ mod on_idle { ErasToCheckPerBlock::::put(BondingDuration::get() + 1); CurrentEra::::put(BondingDuration::get()); + Balances::make_free_balance_be(&2, 100); + // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -438,7 +536,11 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -464,7 +566,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -475,28 +577,44 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -529,30 +647,46 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: a new era happens right before one is free. @@ -567,6 +701,7 @@ mod on_idle { stash: 1, // note era 0 is pruned to keep the vector length sane. checked: bounded_vec![3, 2, 1, 4], + deposit: DepositAmount::get(), }) ); @@ -602,13 +737,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // when @@ -618,13 +761,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // then we register a new era. @@ -636,14 +787,22 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 4] + }) ); // progress to end next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 4, 1] + }) ); // but notice that we don't care about era 0 instead anymore! we're done. @@ -669,7 +828,6 @@ mod on_idle { fn exposed_nominator_cannot_unstake() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(1); - SlashPerEra::set(7); CurrentEra::::put(BondingDuration::get()); // create an exposed nominator in era 1 @@ -686,6 +844,7 @@ mod on_idle { )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); + Balances::make_free_balance_be(&exposed, 100_000); // register the exposed one. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); @@ -693,23 +852,30 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!(Head::::get(), None); assert_eq!( fast_unstake_events_since_last_call(), - // we slash them by 21, since we checked 3 eras in total (3, 2, 1). vec![ Event::Checking { stash: exposed, eras: vec![3] }, Event::Checking { stash: exposed, eras: vec![2] }, - Event::Slashed { stash: exposed, amount: 3 * 7 } + Event::Slashed { stash: exposed, amount: DepositAmount::get() } ] ); }); @@ -721,7 +887,6 @@ mod on_idle { // same as the previous check, but we check 2 eras per block, and we make the exposed be // exposed in era 0, so that it is detected halfway in a check era. ErasToCheckPerBlock::::put(2); - SlashPerEra::set(7); CurrentEra::::put(BondingDuration::get()); // create an exposed nominator in era 1 @@ -729,7 +894,7 @@ mod on_idle { pallet_staking::ErasStakers::::mutate(0, VALIDATORS_PER_ERA, |expo| { expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); }); - Balances::make_free_balance_be(&exposed, 100); + Balances::make_free_balance_be(&exposed, DepositAmount::get() + 100); assert_ok!(Staking::bond( RuntimeOrigin::signed(exposed), exposed, @@ -745,17 +910,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!(Head::::get(), None); assert_eq!( fast_unstake_events_since_last_call(), - // we slash them by 28, since we checked 4 eras in total. + // we slash them vec![ Event::Checking { stash: exposed, eras: vec![3, 2] }, - Event::Slashed { stash: exposed, amount: 4 * 7 } + Event::Slashed { stash: exposed, amount: DepositAmount::get() } ] ); }); @@ -786,7 +955,7 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Slashed { stash: 100, amount: 100 }] + vec![Event::Slashed { stash: 100, amount: DepositAmount::get() }] ); }); } @@ -798,7 +967,7 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // create a new validator that 100% not exposed. - Balances::make_free_balance_be(&42, 100); + Balances::make_free_balance_be(&42, 100 + DepositAmount::get()); assert_ok!(Staking::bond(RuntimeOrigin::signed(42), 42, 10, RewardDestination::Staked)); assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); @@ -809,7 +978,11 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 42, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 42, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -824,69 +997,3 @@ mod on_idle { }); } } - -mod signed_extension { - use super::*; - use sp_runtime::traits::SignedExtension; - - const STAKING_CALL: crate::mock::RuntimeCall = - crate::mock::RuntimeCall::Staking(pallet_staking::Call::::chill {}); - - #[test] - fn does_nothing_if_not_queued() { - ExtBuilder::default().build_and_execute(|| { - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_ok()); - }) - } - - #[test] - fn prevents_queued() { - ExtBuilder::default().build_and_execute(|| { - // given: stash for 2 is 1. - // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - - // then - // stash can't. - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - - // controller can't. - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - }) - } - - #[test] - fn prevents_head_stash() { - ExtBuilder::default().build_and_execute(|| { - // given: stash for 2 is 1. - // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - next_block(true); - - assert_eq!( - Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) - ); - - // then - // stash can't - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - - // controller can't - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - }) - } -} diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index 2ddb8dca27e9e..08b9ab4326eb2 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -17,14 +17,12 @@ //! Types used in the Fast Unstake pallet. -use crate::*; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - traits::{Currency, Get, IsSubType}, + traits::{Currency, Get}, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; -use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; use sp_staking::EraIndex; use sp_std::{fmt::Debug, prelude::*}; @@ -36,80 +34,15 @@ pub type BalanceOf = <::Currency as Currency< #[derive( Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, )] -pub struct UnstakeRequest> { +pub struct UnstakeRequest< + AccountId: Eq + PartialEq + Debug, + MaxChecked: Get, + Balance: PartialEq + Debug, +> { /// Their stash account. pub(crate) stash: AccountId, /// The list of eras for which they have been checked. pub(crate) checked: BoundedVec, -} - -#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, RuntimeDebugNoBound)] -#[scale_info(skip_type_params(T))] -pub struct PreventStakingOpsIfUnbonding(sp_std::marker::PhantomData); - -impl PreventStakingOpsIfUnbonding { - pub fn new() -> Self { - Self(Default::default()) - } -} - -impl sp_runtime::traits::SignedExtension - for PreventStakingOpsIfUnbonding -where - ::RuntimeCall: IsSubType>, -{ - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "PreventStakingOpsIfUnbonding"; - - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn pre_dispatch( - self, - // NOTE: we want to prevent this stash-controller pair from doing anything in the - // staking system as long as they are registered here. - stash_or_controller: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - // we don't check this in the tx-pool as it requires a storage read. - if >>::is_sub_type(call).is_some() { - let check_stash = |stash: &T::AccountId| { - if Queue::::contains_key(&stash) || - Head::::get().map_or(false, |u| &u.stash == stash) - { - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - } else { - Ok(()) - } - }; - match ( - // mapped from controller. - pallet_staking::Ledger::::get(&stash_or_controller), - // mapped from stash. - pallet_staking::Bonded::::get(&stash_or_controller), - ) { - (Some(ledger), None) => { - // it is a controller. - check_stash(&ledger.stash) - }, - (_, Some(_)) => { - // it's a stash. - let stash = stash_or_controller; - check_stash(stash) - }, - (None, None) => { - // They are not a staker -- let them execute. - Ok(()) - }, - } - } else { - Ok(()) - } - } + /// Deposit to be slashed if the unstake was unsuccessful. + pub(crate) deposit: Balance, } From 2ee4cb47fa01ad6c7d6c94acf3370fd26470b388 Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Wed, 28 Sep 2022 04:14:01 +0800 Subject: [PATCH 15/75] Add missing CountedStorageMap in pallet::storage error info (#12356) --- frame/support/procedural/src/pallet/parse/storage.rs | 4 ++-- .../test/tests/pallet_ui/storage_not_storage_type.stderr | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 321c4dd5d4914..b16ff05803d98 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -555,8 +555,8 @@ fn process_generics( found => { let msg = format!( "Invalid pallet::storage, expected ident: `StorageValue` or \ - `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, \ - found `{}`.", + `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` \ + in order to expand metadata, found `{}`.", found, ); return Err(syn::Error::new(segment.ident.span(), msg)) diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr index 4fd59183282d0..223e9cfa3e9f8 100644 --- a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. +error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. --> $DIR/storage_not_storage_type.rs:19:16 | 19 | type Foo = u8; From 17c07af0b953b84dbe89341294e98e586f9b4591 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Wed, 28 Sep 2022 18:21:53 +0800 Subject: [PATCH 16/75] Add storage size component to weights (#12277) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add storage size component to weights * Rename storage_size to proof_size * Update primitives/weights/src/weight_v2.rs Co-authored-by: Oliver Tale-Yazdi * Fixes * cargo fmt * Implement custom Decode and CompactAs * Add missing import * Fixes * Remove CompactAs implementation * Properly migrate from 1D weight * Remove #[pallet::compact] from Weight parameters * More #[pallet::compact] removals * Add unit tests * Set appropriate default block proof size * cargo fmt * Remove nonsensical weight constant * Test only for the reference time weight in frame_system::limits * Only check for reference time weight on idle * Use destructuring syntax * Update test expectations * Fixes * Fixes * Fixes * Correctly migrate from 1D weights * cargo fmt * Migrate using extra extrinsics instead of custom Decode * Fixes * Silence dispatch call warnings that were previously allowed * Fix gas_left test * Use OldWeight instead of u64 * Fixes * Only check for reference time weight in election provider * Fix test expectations * Fix test expectations * Use only reference time weight in grandpa test * Use only reference time weight in examples test * Use only reference time weight in examples test * Fix test expectations Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Alexander Theißen --- frame/alliance/src/lib.rs | 62 ++++- frame/babe/src/tests.rs | 3 +- frame/collective/src/lib.rs | 69 +++++- frame/contracts/src/lib.rs | 179 ++++++++++++++- frame/contracts/src/wasm/mod.rs | 15 +- .../election-provider-multi-phase/src/lib.rs | 9 +- .../src/unsigned.rs | 9 +- frame/examples/basic/src/tests.rs | 6 +- frame/executive/src/lib.rs | 7 +- frame/grandpa/src/tests.rs | 3 +- .../procedural/src/pallet/expand/call.rs | 20 ++ .../procedural/src/pallet/parse/call.rs | 3 + ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- frame/system/src/limits.rs | 21 +- frame/transaction-payment/src/types.rs | 5 +- primitives/weights/src/lib.rs | 21 +- primitives/weights/src/weight_v2.rs | 211 ++++++++++++------ 20 files changed, 531 insertions(+), 128 deletions(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 2ef6718538122..24111b44ced9e 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -120,7 +120,7 @@ use frame_support::{ ChangeMembers, Currency, Get, InitializeMembers, IsSubType, OnUnbalanced, ReservableCurrency, }, - weights::Weight, + weights::{OldWeight, Weight}, }; use pallet_identity::IdentityField; @@ -620,25 +620,22 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(x, y, p2)) .max(T::WeightInfo::close_approved(b, x, y, p2)) .max(T::WeightInfo::close_disapproved(x, y, p2)) - .saturating_add(p1) + .saturating_add(p1.into()) })] - pub fn close( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to use `close`")] + pub fn close_old_weight( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: Weight, + #[pallet::compact] proposal_weight_bound: OldWeight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { + let proposal_weight_bound: Weight = proposal_weight_bound.into(); let who = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); - let info = T::ProposalProvider::close_proposal( - proposal_hash, - index, - proposal_weight_bound, - length_bound, - )?; - Ok(info.into()) + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) } /// Initialize the Alliance, onboard founders, fellows, and allies. @@ -985,6 +982,34 @@ pub mod pallet { Self::deposit_event(Event::UnscrupulousItemRemoved { items }); Ok(()) } + + /// Close a vote that is either approved, disapproved, or whose voting period has ended. + /// + /// Requires the sender to be a founder or fellow. + #[pallet::weight({ + let b = *length_bound; + let x = T::MaxFounders::get(); + let y = T::MaxFellows::get(); + let p1 = *proposal_weight_bound; + let p2 = T::MaxProposals::get(); + T::WeightInfo::close_early_approved(b, x, y, p2) + .max(T::WeightInfo::close_early_disapproved(x, y, p2)) + .max(T::WeightInfo::close_approved(b, x, y, p2)) + .max(T::WeightInfo::close_disapproved(x, y, p2)) + .saturating_add(p1) + })] + pub fn close( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] index: ProposalIndex, + proposal_weight_bound: Weight, + #[pallet::compact] length_bound: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); + + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) + } } } @@ -1197,4 +1222,19 @@ impl, I: 'static> Pallet { } res } + + fn do_close( + proposal_hash: T::Hash, + index: ProposalIndex, + proposal_weight_bound: Weight, + length_bound: u32, + ) -> DispatchResultWithPostInfo { + let info = T::ProposalProvider::close_proposal( + proposal_hash, + index, + proposal_weight_bound, + length_bound, + )?; + Ok(info.into()) + } } diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 8d2a9b326cd0f..d4132e6378540 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -852,7 +852,8 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info.weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index ae68ae2fe3e16..06d5b1fab78e7 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -57,7 +57,7 @@ use frame_support::{ traits::{ Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, }, - weights::Weight, + weights::{OldWeight, Weight}, }; #[cfg(test)] @@ -620,17 +620,20 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(m, p2)) .max(T::WeightInfo::close_approved(b, m, p2)) .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1) + .saturating_add(p1.into()) }, DispatchClass::Operational ))] - pub fn close( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `close`")] + pub fn close_old_weight( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: Weight, + #[pallet::compact] proposal_weight_bound: OldWeight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { + let proposal_weight_bound: Weight = proposal_weight_bound.into(); let _ = ensure_signed(origin)?; Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) @@ -659,6 +662,64 @@ pub mod pallet { let proposal_count = Self::do_disapprove_proposal(proposal_hash); Ok(Some(T::WeightInfo::disapprove_proposal(proposal_count)).into()) } + + /// Close a vote that is either approved, disapproved or whose voting period has ended. + /// + /// May be called by any signed account in order to finish voting and close the proposal. + /// + /// If called before the end of the voting period it will only close the vote if it is + /// has enough votes to be approved or disapproved. + /// + /// If called after the end of the voting period abstentions are counted as rejections + /// unless there is a prime member set and the prime member cast an approval. + /// + /// If the close operation completes successfully with disapproval, the transaction fee will + /// be waived. Otherwise execution of the approved operation will be charged to the caller. + /// + /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed + /// proposal. + /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via + /// `storage::read` so it is `size_of::() == 4` larger than the pure length. + /// + /// # + /// ## Weight + /// - `O(B + M + P1 + P2)` where: + /// - `B` is `proposal` size in bytes (length-fee-bounded) + /// - `M` is members-count (code- and governance-bounded) + /// - `P1` is the complexity of `proposal` preimage. + /// - `P2` is proposal-count (code-bounded) + /// - DB: + /// - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`) + /// - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec + /// `O(P2)`) + /// - any mutations done while executing `proposal` (`P1`) + /// - up to 3 events + /// # + #[pallet::weight(( + { + let b = *length_bound; + let m = T::MaxMembers::get(); + let p1 = *proposal_weight_bound; + let p2 = T::MaxProposals::get(); + T::WeightInfo::close_early_approved(b, m, p2) + .max(T::WeightInfo::close_early_disapproved(m, p2)) + .max(T::WeightInfo::close_approved(b, m, p2)) + .max(T::WeightInfo::close_disapproved(m, p2)) + .saturating_add(p1) + }, + DispatchClass::Operational + ))] + pub fn close( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] index: ProposalIndex, + proposal_weight_bound: Weight, + #[pallet::compact] length_bound: u32, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) + } } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index fc44e4507ca00..f9a1c8decf042 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -113,7 +113,7 @@ use frame_support::{ tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time, }, - weights::Weight, + weights::{OldWeight, Weight}, BoundedVec, WeakBoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; @@ -429,15 +429,18 @@ pub mod pallet { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. - #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] - pub fn call( + #[pallet::weight(T::WeightInfo::call().saturating_add((*gas_limit).into()))] + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] + pub fn call_old_weight( origin: OriginFor, dest: AccountIdLookupOf, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut output = Self::internal_call( @@ -485,17 +488,22 @@ pub mod pallet { /// - The `deploy` function is executed in the context of the newly-created account. #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) - .saturating_add(*gas_limit) + .saturating_add((*gas_limit).into()) )] - pub fn instantiate_with_code( + #[allow(deprecated)] + #[deprecated( + note = "1D weight is used in this extrinsic, please migrate to `instantiate_with_code`" + )] + pub fn instantiate_with_code_old_weight( origin: OriginFor, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code: Vec, data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let code_len = code.len() as u32; let salt_len = salt.len() as u32; @@ -526,17 +534,20 @@ pub mod pallet { /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary /// must be supplied. #[pallet::weight( - T::WeightInfo::instantiate(salt.len() as u32).saturating_add(*gas_limit) + T::WeightInfo::instantiate(salt.len() as u32).saturating_add((*gas_limit).into()) )] - pub fn instantiate( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `instantiate`")] + pub fn instantiate_old_weight( origin: OriginFor, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code_hash: CodeHash, data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let salt_len = salt.len() as u32; let mut output = Self::internal_instantiate( @@ -639,6 +650,154 @@ pub mod pallet { Ok(()) }) } + + /// Makes a call to an account, optionally transferring some balance. + /// + /// # Parameters + /// + /// * `dest`: Address of the contract to call. + /// * `value`: The balance to transfer from the `origin` to `dest`. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `storage_deposit_limit`: The maximum amount of balance that can be charged from the + /// caller to pay for the storage consumed. + /// * `data`: The input data to pass to the contract. + /// + /// * If the account is a smart-contract account, the associated code will be + /// executed and any value will be transferred. + /// * If the account is a regular account, any value will be transferred. + /// * If no account exists and the call value is not less than `existential_deposit`, + /// a regular account will be created and any value will be transferred. + #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] + pub fn call( + origin: OriginFor, + dest: AccountIdLookupOf, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + data: Vec, + ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + let mut output = Self::internal_call( + origin, + dest, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + data, + None, + ); + if let Ok(retval) = &output.result { + if retval.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) + } + + /// Instantiates a new contract from the supplied `code` optionally transferring + /// some balance. + /// + /// This dispatchable has the same effect as calling [`Self::upload_code`] + + /// [`Self::instantiate`]. Bundling them together provides efficiency gains. Please + /// also check the documentation of [`Self::upload_code`]. + /// + /// # Parameters + /// + /// * `value`: The balance to transfer from the `origin` to the newly created contract. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `storage_deposit_limit`: The maximum amount of balance that can be charged/reserved + /// from the caller to pay for the storage consumed. + /// * `code`: The contract code to deploy in raw bytes. + /// * `data`: The input data to pass to the contract constructor. + /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. + /// + /// Instantiation is executed as follows: + /// + /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that + /// code. + /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. + /// - The destination address is computed based on the sender, code_hash and the salt. + /// - The smart-contract account is created at the computed address. + /// - The `value` is transferred to the new account. + /// - The `deploy` function is executed in the context of the newly-created account. + #[pallet::weight( + T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) + .saturating_add(*gas_limit) + )] + pub fn instantiate_with_code( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let code_len = code.len() as u32; + let salt_len = salt.len() as u32; + let mut output = Self::internal_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + Code::Upload(Bytes(code)), + data, + salt, + None, + ); + if let Ok(retval) = &output.result { + if retval.1.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, result)| result), + T::WeightInfo::instantiate_with_code(code_len, salt_len), + ) + } + + /// Instantiates a contract from a previously deployed wasm binary. + /// + /// This function is identical to [`Self::instantiate_with_code`] but without the + /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary + /// must be supplied. + #[pallet::weight( + T::WeightInfo::instantiate(salt.len() as u32).saturating_add(*gas_limit) + )] + pub fn instantiate( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let salt_len = salt.len() as u32; + let mut output = Self::internal_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + Code::Existing(code_hash), + data, + salt, + None, + ); + if let Ok(retval) = &output.result { + if retval.1.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, output)| output), + T::WeightInfo::instantiate(salt_len), + ) + } } #[pallet::event] diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 126a37e9401ec..d8b4cd245356e 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -274,7 +274,11 @@ mod tests { BalanceOf, CodeHash, Error, Pallet as Contracts, }; use assert_matches::assert_matches; - use frame_support::{assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight}; + use frame_support::{ + assert_ok, + dispatch::DispatchResultWithPostInfo, + weights::{OldWeight, Weight}, + }; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; use sp_core::{Bytes, H256}; @@ -1545,10 +1549,11 @@ mod tests { let output = execute(CODE_GAS_LEFT, vec![], &mut ext).unwrap(); - let gas_left = Weight::decode(&mut &*output.data).unwrap(); + let OldWeight(gas_left) = OldWeight::decode(&mut &*output.data).unwrap(); let actual_left = ext.gas_meter.gas_left(); - assert!(gas_left.all_lt(gas_limit), "gas_left must be less than initial"); - assert!(gas_left.all_gt(actual_left), "gas_left must be greater than final"); + // TODO: account for proof size weight + assert!(gas_left < gas_limit.ref_time(), "gas_left must be less than initial"); + assert!(gas_left > actual_left.ref_time(), "gas_left must be greater than final"); } const CODE_VALUE_TRANSFERRED: &str = r#" @@ -1946,7 +1951,7 @@ mod tests { )] ); - assert!(mock_ext.gas_meter.gas_left().all_gt(Weight::zero())); + assert!(mock_ext.gas_meter.gas_left().ref_time() > 0); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 05353e5a3ac61..bba8139f38f44 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1008,8 +1008,10 @@ pub mod pallet { // unlikely to ever return an error: if phase is signed, snapshot will exist. let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; + // TODO: account for proof size weight ensure!( - Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), + Self::solution_weight_of(&raw_solution, size).ref_time() < + T::SignedMaxWeight::get().ref_time(), Error::::SignedTooMuchWeight, ); @@ -2336,8 +2338,9 @@ mod tests { }; let mut active = 1; - while weight_with(active) - .all_lte(::BlockWeights::get().max_block) || + // TODO: account for proof size weight + while weight_with(active).ref_time() <= + ::BlockWeights::get().max_block.ref_time() || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 833f80c90d13e..281ac37421174 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -638,7 +638,8 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - if current_weight.all_lt(max_weight) { + // TODO: account for proof size weight + if current_weight.ref_time() < max_weight.ref_time() { let next_voters = voters.checked_add(step); match next_voters { Some(voters) if voters < max_voters => Ok(voters), @@ -673,7 +674,8 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { + // TODO: account for proof size weight + while voters < max_voters && weight_with(voters + 1).ref_time() < max_weight.ref_time() { voters += 1; } while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { @@ -681,8 +683,9 @@ impl Miner { } let final_decision = voters.min(size.voters); + // TODO: account for proof size weight debug_assert!( - weight_with(final_decision).all_lte(max_weight), + weight_with(final_decision).ref_time() <= max_weight.ref_time(), "weight_with({}) <= {}", final_decision, max_weight, diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index db4787eaa0faa..97fbddfbc41e0 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -191,11 +191,13 @@ fn weights_work() { let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 }; let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert!(info1.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info1.weight.ref_time() > 0); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 }; let info2 = custom_call.get_dispatch_info(); - assert!(info1.weight.all_gt(info2.weight)); + // TODO: account for proof size weight + assert!(info1.weight.ref_time() > info2.weight.ref_time()); } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index a41c82da5757c..014c7a2bc02a6 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -459,7 +459,8 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - if remaining_weight.all_gt(Weight::zero()) { + // TODO: account for proof size weight + if remaining_weight.ref_time() > 0 { let used_weight = >::on_idle( block_number, remaining_weight, @@ -938,13 +939,13 @@ mod tests { block_import_works_inner( new_test_ext_v0(1), array_bytes::hex_n_into_unchecked( - "1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5", + "0d786e24c1f9e6ce237806a22c005bbbc7dee4edd6692b6c5442843d164392de", ), ); block_import_works_inner( new_test_ext(1), array_bytes::hex_n_into_unchecked( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + "348485a4ab856467b440167e45f99b491385e8528e09b0e51f85f814a3021c93", ), ); } diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 775eda58c03e0..5d2ebdf29cb6b 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,7 +856,8 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info.weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 18d5adee63ad6..39d16109aa8fa 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -16,6 +16,7 @@ // limitations under the License. use crate::{pallet::Def, COUNTER}; +use quote::ToTokens; use syn::spanned::Spanned; /// @@ -158,6 +159,24 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }); } + // Extracts #[allow] attributes, necessary so that we don't run into compiler warnings + let maybe_allow_attrs = methods + .iter() + .map(|method| { + method + .attrs + .iter() + .find(|attr| { + if let Ok(syn::Meta::List(syn::MetaList { path, .. })) = attr.parse_meta() { + path.segments.last().map(|seg| seg.ident == "allow").unwrap_or(false) + } else { + false + } + }) + .map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream()) + }) + .collect::>(); + quote::quote_spanned!(span => #[doc(hidden)] pub mod __substrate_call_check { @@ -289,6 +308,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); + #maybe_allow_attrs <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) .map(Into::into).map_err(Into::into) }, diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index 336e08c3d39b7..f7b2c9544d831 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -61,6 +61,8 @@ pub struct CallVariantDef { pub call_index: u8, /// Docs, used for metadata. pub docs: Vec, + /// Attributes annotated at the top of the dispatchable function. + pub attrs: Vec, } /// Attributes for functions in call impl block. @@ -287,6 +289,7 @@ impl CallDef { call_index: final_index, args, docs, + attrs: method.attrs.clone(), }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 5d159ec961c7f..b0716d569409c 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 159 others + and 160 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 4671855431b27..926dc92530659 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 159 others + and 160 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index d9cd20711403d..563190a06f76f 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 76 others + and 77 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 9a4e8d740cb2c..c10005223b674 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 76 others + and 77 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index e182eb626424d..cfc1d261baa01 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -207,7 +207,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults(1u32 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + Self::with_sensible_defaults(1u64 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) } } @@ -224,6 +224,7 @@ impl BlockWeights { } let mut error = ValidationErrors::default(); + // TODO: account for proof size weight in the assertions below for class in DispatchClass::all() { let weights = self.per_class.get(*class); let max_for_class = or_max(weights.max_total); @@ -232,18 +233,16 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) - || max_for_class == Weight::zero(), + (max_for_class.ref_time() > self.base_block.ref_time() && max_for_class.ref_time() > base_for_class.ref_time()) + || max_for_class.ref_time() == 0, &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights - .max_extrinsic - .unwrap_or(Weight::zero()) - .all_lte(max_for_class.saturating_sub(base_for_class)), + weights.max_extrinsic.unwrap_or(Weight::zero()).ref_time() <= + max_for_class.saturating_sub(base_for_class).ref_time(), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -252,14 +251,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), + weights.max_extrinsic.unwrap_or_else(Weight::max_value).ref_time() > 0, &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved.all_gt(base_for_class) || reserved == Weight::zero(), + reserved.ref_time() > base_for_class.ref_time() || reserved.ref_time() == 0, &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -268,7 +267,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), + self.max_block.ref_time() >= weights.max_total.unwrap_or(Weight::zero()).ref_time(), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -277,7 +276,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block.all_gt(base_for_class + self.base_block), + self.max_block.ref_time() > (base_for_class + self.base_block).ref_time(), &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index 1f41ba7b0b72e..fff41ef6937f5 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -140,7 +140,8 @@ mod tests { partial_fee: 1_000_000_u64, }; - let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"1000000"}"#; + let json_str = + r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"1000000"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); @@ -157,7 +158,7 @@ mod tests { partial_fee: u128::max_value(), }; - let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + let json_str = r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index d260f73d41268..e1ac7fcd4e892 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -30,7 +30,7 @@ extern crate self as sp_weights; mod weight_v2; -use codec::{Decode, Encode}; +use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -53,6 +53,25 @@ pub mod constants { pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); } +/// The old weight type. +/// +/// NOTE: This type exists purely for compatibility purposes! Use [`weight_v2::Weight`] in all other +/// cases. +#[derive( + Decode, + Encode, + CompactAs, + PartialEq, + Eq, + Clone, + Copy, + RuntimeDebug, + Default, + MaxEncodedLen, + TypeInfo, +)] +pub struct OldWeight(pub u64); + /// The weight of database operations that the runtime can invoke. /// /// NOTE: This is currently only measured in computational time, and will probably diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index af0f469ebaaeb..a8eaf79a28711 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; use sp_arithmetic::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use sp_debug_derive::RuntimeDebug; @@ -23,22 +23,22 @@ use sp_debug_derive::RuntimeDebug; use super::*; #[derive( - Encode, - Decode, - MaxEncodedLen, - TypeInfo, - Eq, - PartialEq, - Copy, - Clone, - RuntimeDebug, - Default, - CompactAs, + Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, RuntimeDebug, Default, )] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Weight { + #[codec(compact)] /// The weight of computational time used based on some reference hardware. ref_time: u64, + #[codec(compact)] + /// The weight of storage space used by proof of validity. + proof_size: u64, +} + +impl From for Weight { + fn from(old: OldWeight) -> Self { + Weight::from_ref_time(old.0) + } } impl Weight { @@ -48,71 +48,118 @@ impl Weight { self } + /// Set the storage size part of the weight. + pub const fn set_proof_size(mut self, c: u64) -> Self { + self.proof_size = c; + self + } + /// Return the reference time part of the weight. pub const fn ref_time(&self) -> u64 { self.ref_time } - /// Return a mutable reference time part of the weight. + /// Return the storage size part of the weight. + pub const fn proof_size(&self) -> u64 { + self.proof_size + } + + /// Return a mutable reference to the reference time part of the weight. pub fn ref_time_mut(&mut self) -> &mut u64 { &mut self.ref_time } - pub const MAX: Self = Self { ref_time: u64::MAX }; + /// Return a mutable reference to the storage size part of the weight. + pub fn proof_size_mut(&mut self) -> &mut u64 { + &mut self.proof_size + } + + pub const MAX: Self = Self { ref_time: u64::MAX, proof_size: u64::MAX }; /// Get the conservative min of `self` and `other` weight. pub fn min(&self, other: Self) -> Self { - Self { ref_time: self.ref_time.min(other.ref_time) } + Self { + ref_time: self.ref_time.min(other.ref_time), + proof_size: self.proof_size.min(other.proof_size), + } } /// Get the aggressive max of `self` and `other` weight. pub fn max(&self, other: Self) -> Self { - Self { ref_time: self.ref_time.max(other.ref_time) } + Self { + ref_time: self.ref_time.max(other.ref_time), + proof_size: self.proof_size.max(other.proof_size), + } } /// Try to add some `other` weight while upholding the `limit`. pub fn try_add(&self, other: &Self, limit: &Self) -> Option { let total = self.checked_add(other)?; - if total.ref_time > limit.ref_time { + if total.any_gt(*limit) { None } else { Some(total) } } - /// Construct [`Weight`] with reference time weight. + /// Construct [`Weight`] with reference time weight and 0 storage size weight. pub const fn from_ref_time(ref_time: u64) -> Self { - Self { ref_time } + Self { ref_time, proof_size: 0 } + } + + /// Construct [`Weight`] with storage size weight and 0 reference time weight. + pub const fn from_proof_size(proof_size: u64) -> Self { + Self { ref_time: 0, proof_size } + } + + /// Construct [`Weight`] with weight components, namely reference time and storage size weights. + pub const fn from_components(ref_time: u64, proof_size: u64) -> Self { + Self { ref_time, proof_size } } /// Saturating [`Weight`] addition. Computes `self + rhs`, saturating at the numeric bounds of /// all fields instead of overflowing. pub const fn saturating_add(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_add(rhs.ref_time) } + Self { + ref_time: self.ref_time.saturating_add(rhs.ref_time), + proof_size: self.proof_size.saturating_add(rhs.proof_size), + } } /// Saturating [`Weight`] subtraction. Computes `self - rhs`, saturating at the numeric bounds /// of all fields instead of overflowing. pub const fn saturating_sub(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_sub(rhs.ref_time) } + Self { + ref_time: self.ref_time.saturating_sub(rhs.ref_time), + proof_size: self.proof_size.saturating_sub(rhs.proof_size), + } } /// Saturating [`Weight`] scalar multiplication. Computes `self.field * scalar` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_mul(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time.saturating_mul(scalar) } + Self { + ref_time: self.ref_time.saturating_mul(scalar), + proof_size: self.proof_size.saturating_mul(scalar), + } } /// Saturating [`Weight`] scalar division. Computes `self.field / scalar` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_div(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time.saturating_div(scalar) } + Self { + ref_time: self.ref_time.saturating_div(scalar), + proof_size: self.proof_size.saturating_div(scalar), + } } /// Saturating [`Weight`] scalar exponentiation. Computes `self.field.pow(exp)` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_pow(self, exp: u32) -> Self { - Self { ref_time: self.ref_time.saturating_pow(exp) } + Self { + ref_time: self.ref_time.saturating_pow(exp), + proof_size: self.proof_size.saturating_pow(exp), + } } /// Increment [`Weight`] by `amount` via saturating addition. @@ -122,124 +169,144 @@ impl Weight { /// Checked [`Weight`] addition. Computes `self + rhs`, returning `None` if overflow occurred. pub const fn checked_add(&self, rhs: &Self) -> Option { - match self.ref_time.checked_add(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_add(rhs.ref_time) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_add(rhs.proof_size) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] subtraction. Computes `self - rhs`, returning `None` if overflow /// occurred. pub const fn checked_sub(&self, rhs: &Self) -> Option { - match self.ref_time.checked_sub(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_sub(rhs.ref_time) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_sub(rhs.proof_size) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] scalar multiplication. Computes `self.field * scalar` for each field, /// returning `None` if overflow occurred. pub const fn checked_mul(self, scalar: u64) -> Option { - match self.ref_time.checked_mul(scalar) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_mul(scalar) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_mul(scalar) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] scalar division. Computes `self.field / scalar` for each field, returning /// `None` if overflow occurred. pub const fn checked_div(self, scalar: u64) -> Option { - match self.ref_time.checked_div(scalar) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_div(scalar) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_div(scalar) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Return a [`Weight`] where all fields are zero. pub const fn zero() -> Self { - Self { ref_time: 0 } + Self { ref_time: 0, proof_size: 0 } } /// Constant version of Add with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn add(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time + scalar } + Self { ref_time: self.ref_time + scalar, proof_size: self.proof_size + scalar } } /// Constant version of Sub with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn sub(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time - scalar } + Self { ref_time: self.ref_time - scalar, proof_size: self.proof_size - scalar } } /// Constant version of Div with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn div(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time / scalar } + Self { ref_time: self.ref_time / scalar, proof_size: self.proof_size / scalar } } /// Constant version of Mul with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn mul(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time * scalar } + Self { ref_time: self.ref_time * scalar, proof_size: self.proof_size * scalar } } /// Returns true if any of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn any_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time + self.ref_time > other.ref_time || self.proof_size > other.proof_size } /// Returns true if all of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn all_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time + self.ref_time > other.ref_time && self.proof_size > other.proof_size } /// Returns true if any of `self`'s constituent weights is strictly less than that of the /// `other`'s, otherwise returns false. pub const fn any_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time + self.ref_time < other.ref_time || self.proof_size < other.proof_size } /// Returns true if all of `self`'s constituent weights is strictly less than that of the /// `other`'s, otherwise returns false. pub const fn all_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time + self.ref_time < other.ref_time && self.proof_size < other.proof_size } /// Returns true if any of `self`'s constituent weights is greater than or equal to that of the /// `other`'s, otherwise returns false. pub const fn any_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time + self.ref_time >= other.ref_time || self.proof_size >= other.proof_size } /// Returns true if all of `self`'s constituent weights is greater than or equal to that of the /// `other`'s, otherwise returns false. pub const fn all_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time + self.ref_time >= other.ref_time && self.proof_size >= other.proof_size } /// Returns true if any of `self`'s constituent weights is less than or equal to that of the /// `other`'s, otherwise returns false. pub const fn any_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time + self.ref_time <= other.ref_time || self.proof_size <= other.proof_size } /// Returns true if all of `self`'s constituent weights is less than or equal to that of the /// `other`'s, otherwise returns false. pub const fn all_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time + self.ref_time <= other.ref_time && self.proof_size <= other.proof_size } /// Returns true if any of `self`'s constituent weights is equal to that of the `other`'s, /// otherwise returns false. pub const fn any_eq(self, other: Self) -> bool { - self.ref_time == other.ref_time + self.ref_time == other.ref_time || self.proof_size == other.proof_size } // NOTE: `all_eq` does not exist, as it's simply the `eq` method from the `PartialEq` trait. @@ -258,14 +325,20 @@ impl Zero for Weight { impl Add for Weight { type Output = Self; fn add(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time + rhs.ref_time } + Self { + ref_time: self.ref_time + rhs.ref_time, + proof_size: self.proof_size + rhs.proof_size, + } } } impl Sub for Weight { type Output = Self; fn sub(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time - rhs.ref_time } + Self { + ref_time: self.ref_time - rhs.ref_time, + proof_size: self.proof_size - rhs.proof_size, + } } } @@ -275,7 +348,7 @@ where { type Output = Self; fn mul(self, b: T) -> Self { - Self { ref_time: b * self.ref_time } + Self { ref_time: b * self.ref_time, proof_size: b * self.proof_size } } } @@ -285,7 +358,10 @@ macro_rules! weight_mul_per_impl { impl Mul for $t { type Output = Weight; fn mul(self, b: Weight) -> Weight { - Weight { ref_time: self * b.ref_time } + Weight { + ref_time: self * b.ref_time, + proof_size: self * b.proof_size, + } } } )* @@ -305,7 +381,10 @@ macro_rules! weight_mul_primitive_impl { impl Mul for $t { type Output = Weight; fn mul(self, b: Weight) -> Weight { - Weight { ref_time: u64::from(self) * b.ref_time } + Weight { + ref_time: u64::from(self) * b.ref_time, + proof_size: u64::from(self) * b.proof_size, + } } } )* @@ -320,7 +399,7 @@ where { type Output = Self; fn div(self, b: T) -> Self { - Self { ref_time: self.ref_time / b } + Self { ref_time: self.ref_time / b, proof_size: self.proof_size / b } } } @@ -338,7 +417,7 @@ impl CheckedSub for Weight { impl core::fmt::Display for Weight { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Weight(ref_time: {})", self.ref_time) + write!(f, "Weight(ref_time: {}, proof_size: {})", self.ref_time, self.proof_size) } } @@ -353,12 +432,18 @@ impl Bounded for Weight { impl AddAssign for Weight { fn add_assign(&mut self, other: Self) { - *self = Self { ref_time: self.ref_time + other.ref_time }; + *self = Self { + ref_time: self.ref_time + other.ref_time, + proof_size: self.proof_size + other.proof_size, + }; } } impl SubAssign for Weight { fn sub_assign(&mut self, other: Self) { - *self = Self { ref_time: self.ref_time - other.ref_time }; + *self = Self { + ref_time: self.ref_time - other.ref_time, + proof_size: self.proof_size - other.proof_size, + }; } } From 01a905e304f2b6b2c1caf4c12b622edb12b265fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 28 Sep 2022 14:37:03 +0200 Subject: [PATCH 17/75] pallet-utility: Only disallow the `None` origin (#12351) --- frame/utility/src/lib.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 819314f3d8454..9ae89097a9bc3 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -63,7 +63,7 @@ use frame_support::{ }; use sp_core::TypeId; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; +use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; @@ -203,7 +203,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -319,7 +324,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -426,7 +436,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); From 1b1a5e12c0e391c7ed4e3ffa332eb2fe928d257f Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 28 Sep 2022 14:43:04 +0200 Subject: [PATCH 18/75] Fix staking migration (#12373) Causing issues on Kusama... Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/staking/src/migrations.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 8f37ae30dd056..f2ccb4f8b096f 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -40,10 +40,14 @@ pub mod v12 { "Expected v11 before upgrading to v12" ); - frame_support::ensure!( - T::HistoryDepth::get() == HistoryDepth::::get(), - "Provided value of HistoryDepth should be same as the existing storage value" - ); + if HistoryDepth::::exists() { + frame_support::ensure!( + T::HistoryDepth::get() == HistoryDepth::::get(), + "Provided value of HistoryDepth should be same as the existing storage value" + ); + } else { + log::info!("No HistoryDepth in storage; nothing to remove"); + } Ok(Default::default()) } From 0ec4373d9c1252b60f0a3512fd910b1d48af385a Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 29 Sep 2022 04:38:12 +0900 Subject: [PATCH 19/75] Support running the pallet benchmarks analysis without running the benchmarks (#12361) * Support running the pallet benchmarks analysis without running the benchmarks * Rename `override-results` to `json-input` and update the help comment * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> --- frame/benchmarking/src/utils.rs | 20 ++++-- .../benchmarking-cli/src/pallet/command.rs | 67 +++++++++++++++++-- .../frame/benchmarking-cli/src/pallet/mod.rs | 10 ++- 3 files changed, 82 insertions(+), 15 deletions(-) diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index b483208e3ef69..753e8c1c684ee 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -23,14 +23,14 @@ use frame_support::{ traits::StorageInfo, }; #[cfg(feature = "std")] -use serde::Serialize; +use serde::{Deserialize, Serialize}; use sp_io::hashing::blake2_256; use sp_runtime::traits::TrailingZeroInput; use sp_std::{prelude::Box, vec::Vec}; use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] #[allow(missing_docs)] #[allow(non_camel_case_types)] @@ -71,7 +71,7 @@ impl std::fmt::Display for BenchmarkParameter { } /// The results of a single of benchmark. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatch { /// The pallet containing this benchmark. @@ -89,7 +89,7 @@ pub struct BenchmarkBatch { // TODO: could probably make API cleaner here. /// The results of a single of benchmark, where time and db results are separated. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatchSplitResults { /// The pallet containing this benchmark. @@ -110,7 +110,7 @@ pub struct BenchmarkBatchSplitResults { /// Result from running benchmarks on a FRAME pallet. /// Contains duration of the function call in nanoseconds along with the benchmark parameters /// used for that benchmark result. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] pub struct BenchmarkResult { pub components: Vec<(BenchmarkParameter, u32)>, @@ -121,7 +121,7 @@ pub struct BenchmarkResult { pub writes: u32, pub repeat_writes: u32, pub proof_size: u32, - #[cfg_attr(feature = "std", serde(skip_serializing))] + #[cfg_attr(feature = "std", serde(skip))] pub keys: Vec<(Vec, u32, u32, bool)>, } @@ -141,6 +141,14 @@ mod serde_as_str { let s = std::str::from_utf8(value).map_err(serde::ser::Error::custom)?; serializer.collect_str(s) } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: serde::de::Deserializer<'de>, + { + let s: &str = serde::de::Deserialize::deserialize(deserializer)?; + Ok(s.into()) + } } /// Possible errors returned from the benchmarking pipeline. diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 6870ec386d23d..72592617c52ac 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -134,6 +134,20 @@ impl PalletCmd { }; } + if let Some(json_input) = &self.json_input { + let raw_data = match std::fs::read(json_input) { + Ok(raw_data) => raw_data, + Err(error) => + return Err(format!("Failed to read {:?}: {}", json_input, error).into()), + }; + let batches: Vec = match serde_json::from_slice(&raw_data) { + Ok(batches) => batches, + Err(error) => + return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()), + }; + return self.output_from_results(&batches) + } + let spec = config.chain_spec; let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); let pallet = self.pallet.clone().unwrap_or_default(); @@ -396,8 +410,16 @@ impl PalletCmd { // Combine all of the benchmark results, so that benchmarks of the same pallet/function // are together. - let batches: Vec = combine_batches(batches, batches_db); + let batches = combine_batches(batches, batches_db); + self.output(&batches, &storage_info, &component_ranges) + } + fn output( + &self, + batches: &[BenchmarkBatchSplitResults], + storage_info: &[StorageInfo], + component_ranges: &HashMap<(Vec, Vec), Vec>, + ) -> Result<()> { // Jsonify the result and write it to a file or stdout if desired. if !self.jsonify(&batches)? { // Print the summary only if `jsonify` did not write to stdout. @@ -412,10 +434,45 @@ impl PalletCmd { Ok(()) } + fn output_from_results(&self, batches: &[BenchmarkBatchSplitResults]) -> Result<()> { + let mut component_ranges = + HashMap::<(Vec, Vec), HashMap>::new(); + for batch in batches { + let range = component_ranges + .entry((batch.pallet.clone(), batch.benchmark.clone())) + .or_default(); + for result in &batch.time_results { + for (param, value) in &result.components { + let name = param.to_string(); + let (ref mut min, ref mut max) = range.entry(name).or_insert((*value, *value)); + if *value < *min { + *min = *value; + } + if *value > *max { + *max = *value; + } + } + } + } + + let component_ranges: HashMap<_, _> = component_ranges + .into_iter() + .map(|(key, ranges)| { + let ranges = ranges + .into_iter() + .map(|(name, (min, max))| ComponentRange { name, min, max }) + .collect(); + (key, ranges) + }) + .collect(); + + self.output(batches, &[], &component_ranges) + } + /// Jsonifies the passed batches and writes them to stdout or into a file. /// Can be configured via `--json` and `--json-file`. /// Returns whether it wrote to stdout. - fn jsonify(&self, batches: &Vec) -> Result { + fn jsonify(&self, batches: &[BenchmarkBatchSplitResults]) -> Result { if self.json_output || self.json_file.is_some() { let json = serde_json::to_string_pretty(&batches) .map_err(|e| format!("Serializing into JSON: {:?}", e))?; @@ -432,11 +489,7 @@ impl PalletCmd { } /// Prints the results as human-readable summary without raw timing data. - fn print_summary( - &self, - batches: &Vec, - storage_info: &Vec, - ) { + fn print_summary(&self, batches: &[BenchmarkBatchSplitResults], storage_info: &[StorageInfo]) { for batch in batches.iter() { // Print benchmark metadata println!( diff --git a/utils/frame/benchmarking-cli/src/pallet/mod.rs b/utils/frame/benchmarking-cli/src/pallet/mod.rs index b8c1f7b905c0c..0e698c4e73910 100644 --- a/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -35,11 +35,11 @@ fn parse_pallet_name(pallet: &str) -> String { #[derive(Debug, clap::Parser)] pub struct PalletCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present = "list")] + #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present_any = ["list", "json-input"])] pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[clap(short, long, required_unless_present = "list")] + #[clap(short, long, required_unless_present_any = ["list", "json-input"])] pub extrinsic: Option, /// Select how many samples we should take across the variable components. @@ -166,4 +166,10 @@ pub struct PalletCmd { /// template for that purpose. #[clap(long)] pub no_storage_info: bool, + + /// A path to a `.json` file with existing benchmark results generated with `--json` or + /// `--json-file`. When specified the benchmarks are not actually executed, and the data for + /// the analysis is read from this file. + #[clap(long)] + pub json_input: Option, } From d66adfabd7911bf01ab01ec96ec4228307a03e07 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 28 Sep 2022 22:00:33 +0200 Subject: [PATCH 20/75] fix: typo in AllPalletsWithSystem deprecated msg (#12379) --- frame/support/procedural/src/construct_runtime/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index e20cb61b7aec1..73d0d54343eb9 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -422,7 +422,7 @@ fn decl_all_pallets<'a>( /// All pallets included in the runtime as a nested tuple of types in reversed order. /// Excludes the System pallet. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); } }); @@ -433,7 +433,7 @@ fn decl_all_pallets<'a>( #attr /// All pallets included in the runtime as a nested tuple of types in reversed order. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsWithSystemReversed = ( #(#names,)* ); } }); @@ -447,7 +447,7 @@ fn decl_all_pallets<'a>( /// All pallets included in the runtime as a nested tuple of types in reversed order. /// With the system pallet first. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); } }); From 96de768061b182934b2d824b2fe76effb5b4db85 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 28 Sep 2022 22:04:14 +0200 Subject: [PATCH 21/75] New Pallet: Root offences (#11943) * root-offences pallet * fix errors * cleaned up a bit * remove unwrap() * new pallet is getting compiled * remove unnecessary type annotations * remove more unnecessary type annotations * addidtional cleaning * commit * cleaned up * fix in logic * add event * removed Clone trait from AccountId * test module * remove unused imports * fmt * fix * separate into functions, still messy * test * first test * fmt * cleaned up a bit * separate into mock.rs and tests.rs * basic docs for now * pallet_staking GenesisiConfig * fix * added start_session * passing tests * impl GenesisConfig for pallet_session * updated event * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * docs * Update frame/root-offences/README.md Co-authored-by: Andronik * Update frame/root-offences/Cargo.toml Co-authored-by: Andronik * license header Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Andronik --- Cargo.lock | 22 ++ Cargo.toml | 1 + frame/nomination-pools/src/lib.rs | 2 +- frame/root-offences/Cargo.toml | 51 +++++ frame/root-offences/README.md | 5 + frame/root-offences/src/lib.rs | 131 +++++++++++ frame/root-offences/src/mock.rs | 356 ++++++++++++++++++++++++++++++ frame/root-offences/src/tests.rs | 94 ++++++++ 8 files changed, 661 insertions(+), 1 deletion(-) create mode 100644 frame/root-offences/Cargo.toml create mode 100644 frame/root-offences/README.md create mode 100644 frame/root-offences/src/lib.rs create mode 100644 frame/root-offences/src/mock.rs create mode 100644 frame/root-offences/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index a9a0eef551179..de50d4ec27105 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6197,6 +6197,28 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-root-offences" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-offences", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 018355df6c9fd..25f12a2c9fd3f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,6 +135,7 @@ members = [ "frame/staking/reward-fn", "frame/state-trie-migration", "frame/sudo", + "frame/root-offences", "frame/support", "frame/support/procedural", "frame/support/procedural/tools", diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 28d10ce573401..9e77adaeee677 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -2523,7 +2523,7 @@ impl Pallet { impl OnStakerSlash> for Pallet { fn on_slash( pool_account: &T::AccountId, - // Bonded balance is always read directly from staking, therefore we need not update + // Bonded balance is always read directly from staking, therefore we don't need to update // anything here. slashed_bonded: BalanceOf, slashed_unlocking: &BTreeMap>, diff --git a/frame/root-offences/Cargo.toml b/frame/root-offences/Cargo.toml new file mode 100644 index 0000000000000..ea6a6527848aa --- /dev/null +++ b/frame/root-offences/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "pallet-root-offences" +version = "1.0.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME root offences pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../frame/session", default-features = false } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../frame/staking" } +pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../frame/offences" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } + +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } + +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } + +[features] +runtime-benchmarks = [] +try-runtime = ["frame-support/try-runtime"] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-session/std", + "pallet-staking/std", + "pallet-offences/std", + "scale-info/std", + "sp-runtime/std", +] diff --git a/frame/root-offences/README.md b/frame/root-offences/README.md new file mode 100644 index 0000000000000..a2c5261b6985a --- /dev/null +++ b/frame/root-offences/README.md @@ -0,0 +1,5 @@ +# Sudo Offences Pallet + +Pallet that allows the root to create an offence. + +NOTE: This pallet should only be used for testing purposes. \ No newline at end of file diff --git a/frame/root-offences/src/lib.rs b/frame/root-offences/src/lib.rs new file mode 100644 index 0000000000000..b4b549627f3fa --- /dev/null +++ b/frame/root-offences/src/lib.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Sudo Offences Pallet +//! Pallet that allows the root to create an offence. +//! +//! NOTE: This pallet should be used for testing purposes. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +use pallet_session::historical::IdentificationTuple; +use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; +use sp_runtime::Perbill; +use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: + frame_system::Config + + pallet_staking::Config + + pallet_session::Config::AccountId> + + pallet_session::historical::Config< + FullIdentification = Exposure< + ::AccountId, + BalanceOf, + >, + FullIdentificationOf = ExposureOf, + > + { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An offence was created by root. + OffenceCreated { offenders: Vec<(T::AccountId, Perbill)> }, + } + + #[pallet::error] + pub enum Error { + /// Failed to get the active era from the staking pallet. + FailedToGetActiveEra, + } + + type OffenceDetails = sp_staking::offence::OffenceDetails< + ::AccountId, + IdentificationTuple, + >; + + #[pallet::call] + impl Pallet { + /// Allows the `root`, for example sudo to create an offence. + #[pallet::weight(T::DbWeight::get().reads(2))] + pub fn create_offence( + origin: OriginFor, + offenders: Vec<(T::AccountId, Perbill)>, + ) -> DispatchResult { + ensure_root(origin)?; + + let slash_fraction = + offenders.clone().into_iter().map(|(_, fraction)| fraction).collect::>(); + let offence_details = Self::get_offence_details(offenders.clone())?; + + Self::submit_offence(&offence_details, &slash_fraction); + Self::deposit_event(Event::OffenceCreated { offenders }); + Ok(()) + } + } + + impl Pallet { + /// Returns a vector of offenders that are going to be slashed. + fn get_offence_details( + offenders: Vec<(T::AccountId, Perbill)>, + ) -> Result>, DispatchError> { + let now = Staking::::active_era() + .map(|e| e.index) + .ok_or(Error::::FailedToGetActiveEra)?; + + Ok(offenders + .clone() + .into_iter() + .map(|(o, _)| OffenceDetails:: { + offender: (o.clone(), Staking::::eras_stakers(now, o)), + reporters: vec![], + }) + .collect()) + } + + /// Submits the offence by calling the `on_offence` function. + fn submit_offence(offenders: &[OffenceDetails], slash_fraction: &[Perbill]) { + let session_index = as frame_support::traits::ValidatorSet>::session_index(); + + as OnOffenceHandler< + T::AccountId, + IdentificationTuple, + Weight, + >>::on_offence(&offenders, &slash_fraction, session_index, DisableStrategy::WhenSlashed); + } + } +} diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs new file mode 100644 index 0000000000000..3f0a26afc1358 --- /dev/null +++ b/frame/root-offences/src/mock.rs @@ -0,0 +1,356 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as root_offences; + +use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64, GenesisBuild, Hooks, OneSessionHandler}, +}; +use pallet_staking::StakerStatus; +use sp_core::H256; +use sp_runtime::{ + curve::PiecewiseLinear, + testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, IdentityLookup, Zero}, +}; +use sp_staking::{EraIndex, SessionIndex}; +use sp_std::collections::btree_map::BTreeMap; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; +type AccountId = u64; +type Balance = u64; +type BlockNumber = u64; + +pub const INIT_TIMESTAMP: u64 = 30_000; +pub const BLOCK_TIME: u64 = 1000; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + RootOffences: root_offences::{Pallet, Call, Storage, Event}, + Historical: pallet_session::historical::{Pallet, Storage}, + } +); + +/// Another session handler struct to test on_disabled. +pub struct OtherSessionHandler; +impl OneSessionHandler for OtherSessionHandler { + type Key = UintAuthorityId; + + fn on_genesis_session<'a, I: 'a>(_: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_disabled(_validator_index: u32) {} +} + +impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { + type Public = UintAuthorityId; +} + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); +} + +pallet_staking_reward_curve::build! { + const REWARD_CURVE: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000u64, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Test; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); +} + +pub struct OnStakerSlashMock(core::marker::PhantomData); +impl sp_staking::OnStakerSlash for OnStakerSlashMock { + fn on_slash( + _pool_account: &AccountId, + slashed_bonded: Balance, + slashed_chunks: &BTreeMap, + ) { + LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); + } +} + +parameter_types! { + pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub static Offset: BlockNumber = 0; + pub const Period: BlockNumber = 1; + pub static SessionsPerEra: SessionIndex = 3; + pub static SlashDeferDuration: EraIndex = 0; + pub const BondingDuration: EraIndex = 3; + pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); +} + +impl pallet_staking::Config for Test { + type MaxNominations = ConstU32<16>; + type Currency = Balances; + type CurrencyBalance = ::Balance; + type UnixTime = Timestamp; + type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type SlashDeferDuration = SlashDeferDuration; + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = Self; + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = Session; + type MaxNominatorRewardedPerValidator = ConstU32<64>; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; + type ElectionProvider = onchain::UnboundedExecution; + type GenesisElectionProvider = Self::ElectionProvider; + type TargetList = pallet_staking::UseValidatorsMap; + type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type OnStakerSlash = OnStakerSlashMock; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); +} + +impl pallet_session::historical::Config for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub other: OtherSessionHandler, + } +} + +impl pallet_session::Config for Test { + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionHandler = (OtherSessionHandler,); + type RuntimeEvent = RuntimeEvent; + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type NextSessionRotation = pallet_session::PeriodicSessions; + type WeightInfo = (); +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +pub struct ExtBuilder { + validator_count: u32, + minimum_validator_count: u32, + invulnerables: Vec, + balance_factor: Balance, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { + validator_count: 2, + minimum_validator_count: 0, + invulnerables: vec![], + balance_factor: 1, + } + } +} + +impl ExtBuilder { + fn build(self) -> sp_io::TestExternalities { + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + //controllers + (10, self.balance_factor * 50), + (20, self.balance_factor * 50), + (30, self.balance_factor * 50), + (40, self.balance_factor * 50), + // stashes + (11, self.balance_factor * 1000), + (21, self.balance_factor * 1000), + (31, self.balance_factor * 500), + (41, self.balance_factor * 1000), + ], + } + .assimilate_storage(&mut storage) + .unwrap(); + + let stakers = vec![ + // (stash, ctrl, stake, status) + // these two will be elected in the default test where we elect 2. + (11, 10, 1000, StakerStatus::::Validator), + (21, 20, 1000, StakerStatus::::Validator), + // a loser validator + (31, 30, 500, StakerStatus::::Validator), + // an idle validator + (41, 40, 1000, StakerStatus::::Idle), + ]; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + ..Default::default() + }; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + validator_count: self.validator_count, + minimum_validator_count: self.minimum_validator_count, + invulnerables: self.invulnerables, + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + } + .assimilate_storage(&mut storage); + + let _ = pallet_session::GenesisConfig:: { + keys: stakers + .into_iter() + .map(|(id, ..)| (id, id, SessionKeys { other: id.into() })) + .collect(), + } + .assimilate_storage(&mut storage); + + storage.into() + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = self.build(); + ext.execute_with(test); + } +} + +/// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. +pub(crate) fn start_session(session_index: SessionIndex) { + let end: u64 = if Offset::get().is_zero() { + (session_index as u64) * Period::get() + } else { + Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get() + }; + run_to_block(end); + // session must have progressed properly. + assert_eq!( + Session::current_index(), + session_index, + "current session index = {}, expected = {}", + Session::current_index(), + session_index, + ); +} + +/// Progress to the given block, triggering session and era changes as we progress. +/// +/// This will finalize the previous block, initialize up to the given block, essentially simulating +/// a block import/propose process where we first initialize the block, then execute some stuff (not +/// in the function), and then finalize the block. +pub(crate) fn run_to_block(n: BlockNumber) { + Staking::on_finalize(System::block_number()); + for b in (System::block_number() + 1)..=n { + System::set_block_number(b); + Session::on_initialize(b); + >::on_initialize(b); + Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); + if b != n { + Staking::on_finalize(System::block_number()); + } + } +} + +pub(crate) fn active_era() -> EraIndex { + Staking::active_era().unwrap().index +} diff --git a/frame/root-offences/src/tests.rs b/frame/root-offences/src/tests.rs new file mode 100644 index 0000000000000..a8b7d0a6d6aca --- /dev/null +++ b/frame/root-offences/src/tests.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_support::{assert_err, assert_ok}; +use mock::{active_era, start_session, Balances, ExtBuilder, RootOffences, RuntimeOrigin, System}; + +#[test] +fn create_offence_fails_given_signed_origin() { + use sp_runtime::traits::BadOrigin; + ExtBuilder::default().build_and_execute(|| { + let offenders = (&[]).to_vec(); + assert_err!(RootOffences::create_offence(RuntimeOrigin::signed(1), offenders), BadOrigin); + }) +} + +#[test] +fn create_offence_works_given_root_origin() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + assert_eq!(Balances::free_balance(11), 1000); + + let offenders = [(11, Perbill::from_percent(50))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + // the slash should be applied right away. + assert_eq!(Balances::free_balance(11), 500); + + // the other validator should keep his balance, because we only created + // an offences for the first validator. + assert_eq!(Balances::free_balance(21), 1000); + }) +} + +#[test] +fn create_offence_wont_slash_non_active_validators() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + // 31 is not an active validator. + assert_eq!(Balances::free_balance(31), 500); + + let offenders = [(31, Perbill::from_percent(20)), (11, Perbill::from_percent(20))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + + // so 31 didn't get slashed. + assert_eq!(Balances::free_balance(31), 500); + + // but 11 is an active validator so he got slashed. + assert_eq!(Balances::free_balance(11), 800); + }) +} + +#[test] +fn create_offence_wont_slash_idle() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + // 41 is idle. + assert_eq!(Balances::free_balance(41), 1000); + + let offenders = [(41, Perbill::from_percent(50))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + + // 41 didn't get slashed. + assert_eq!(Balances::free_balance(41), 1000); + }) +} From e7f994d1e797420f252dd24714b029071ccbc46c Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 28 Sep 2022 22:52:16 +0200 Subject: [PATCH 22/75] bounding staking: `BoundedElectionProvider` trait (#12362) * add a bounded election provider trait * extract common trait election provider base * fmt * only bound the outer support vector * fix tests * docs * fix rust docs * fmt * fix rustdocs * docs * improve docs * small doc change --- .../election-provider-multi-phase/src/lib.rs | 16 ++-- .../election-provider-multi-phase/src/mock.rs | 5 +- frame/election-provider-support/src/lib.rs | 83 ++++++++++++------- .../election-provider-support/src/onchain.rs | 27 +++--- frame/fast-unstake/src/lib.rs | 5 +- frame/fast-unstake/src/mock.rs | 4 +- primitives/npos-elections/src/lib.rs | 14 ++-- 7 files changed, 99 insertions(+), 55 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index bba8139f38f44..649aec30c58b3 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -231,7 +231,8 @@ use codec::{Decode, Encode}; use frame_election_provider_support::{ - ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolution, + ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, + NposSolution, }; use frame_support::{ dispatch::DispatchClass, @@ -289,7 +290,7 @@ pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex pub type SolutionAccuracyOf = ::MinerConfig> as NposSolution>::Accuracy; /// The fallback election type. -pub type FallbackErrorOf = <::Fallback as ElectionProvider>::Error; +pub type FallbackErrorOf = <::Fallback as ElectionProviderBase>::Error; /// Configuration for the benchmarks of the pallet. pub trait BenchmarkingConfig { @@ -312,7 +313,7 @@ pub trait BenchmarkingConfig { /// A fallback implementation that transitions the pallet to the emergency phase. pub struct NoFallback(sp_std::marker::PhantomData); -impl ElectionProvider for NoFallback { +impl ElectionProviderBase for NoFallback { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type DataProvider = T::DataProvider; @@ -321,7 +322,9 @@ impl ElectionProvider for NoFallback { fn ongoing() -> bool { false } +} +impl ElectionProvider for NoFallback { fn elect() -> Result, Self::Error> { // Do nothing, this will enable the emergency phase. Err("NoFallback.") @@ -1563,7 +1566,7 @@ impl Pallet { >::take() .ok_or(ElectionError::::NothingQueued) .or_else(|_| { - T::Fallback::elect() + ::elect() .map(|supports| ReadySolution { supports, score: Default::default(), @@ -1598,7 +1601,7 @@ impl Pallet { } } -impl ElectionProvider for Pallet { +impl ElectionProviderBase for Pallet { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type Error = ElectionError; @@ -1610,7 +1613,9 @@ impl ElectionProvider for Pallet { _ => true, } } +} +impl ElectionProvider for Pallet { fn elect() -> Result, Self::Error> { match Self::do_elect() { Ok(supports) => { @@ -1627,7 +1632,6 @@ impl ElectionProvider for Pallet { } } } - /// convert a DispatchError to a custom InvalidTransaction with the inner code being the error /// number. pub fn dispatch_error_to_invalid(error: DispatchError) -> InvalidTransaction { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 34aa2e1bbfc58..c1c53a3980676 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -297,7 +297,7 @@ impl onchain::Config for OnChainSeqPhragmen { } pub struct MockFallback; -impl ElectionProvider for MockFallback { +impl ElectionProviderBase for MockFallback { type AccountId = AccountId; type BlockNumber = u64; type Error = &'static str; @@ -306,7 +306,8 @@ impl ElectionProvider for MockFallback { fn ongoing() -> bool { false } - +} +impl ElectionProvider for MockFallback { fn elect() -> Result, Self::Error> { Self::elect_with_bounds(Bounded::max_value(), Bounded::max_value()) } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 0bf62bd8c35cd..5ee65e102bd06 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -20,10 +20,11 @@ //! This crate provides two traits that could interact to enable extensible election functionality //! within FRAME pallets. //! -//! Something that will provide the functionality of election will implement [`ElectionProvider`], -//! whilst needing an associated [`ElectionProvider::DataProvider`], which needs to be fulfilled by -//! an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* the receiver -//! of the election, resulting in a diagram as below: +//! Something that will provide the functionality of election will implement +//! [`ElectionProvider`] and its parent-trait [`ElectionProviderBase`], whilst needing an +//! associated [`ElectionProviderBase::DataProvider`], which needs to be +//! fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* +//! the receiver of the election, resulting in a diagram as below: //! //! ```ignore //! ElectionDataProvider @@ -131,12 +132,16 @@ //! type DataProvider: ElectionDataProvider; //! } //! -//! impl ElectionProvider for GenericElectionProvider { +//! impl ElectionProviderBase for GenericElectionProvider { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; //! type Error = &'static str; //! type DataProvider = T::DataProvider; //! fn ongoing() -> bool { false } +//! +//! } +//! +//! impl ElectionProvider for GenericElectionProvider { //! fn elect() -> Result, Self::Error> { //! Self::DataProvider::electable_targets(None) //! .map_err(|_| "failed to elect") @@ -177,8 +182,8 @@ pub use frame_support::{traits::Get, weights::Weight, BoundedVec, RuntimeDebug}; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; pub use sp_npos_elections::{ - Assignment, BalancingConfig, ElectionResult, Error, ExtendedBalance, IdentifierT, PerThing128, - Support, Supports, VoteWeight, + Assignment, BalancingConfig, BoundedSupports, ElectionResult, Error, ExtendedBalance, + IdentifierT, PerThing128, Support, Supports, VoteWeight, }; pub use traits::NposSolution; @@ -349,12 +354,12 @@ pub trait ElectionDataProvider { fn clear() {} } -/// Something that can compute the result of an election and pass it back to the caller. +/// Base trait for [`ElectionProvider`] and [`BoundedElectionProvider`]. It is +/// meant to be used only with an extension trait that adds an election +/// functionality. /// -/// This trait only provides an interface to _request_ an election, i.e. -/// [`ElectionProvider::elect`]. That data required for the election need to be passed to the -/// implemented of this trait through [`ElectionProvider::DataProvider`]. -pub trait ElectionProvider { +/// Data can be bounded or unbounded and is fetched from [`Self::DataProvider`]. +pub trait ElectionProviderBase { /// The account identifier type. type AccountId; @@ -372,24 +377,39 @@ pub trait ElectionProvider { /// Indicate if this election provider is currently ongoing an asynchronous election or not. fn ongoing() -> bool; +} - /// Elect a new set of winners, without specifying any bounds on the amount of data fetched from - /// [`Self::DataProvider`]. An implementation could nonetheless impose its own custom limits. - /// - /// The result is returned in a target major format, namely as *vector of supports*. - /// - /// This should be implemented as a self-weighing function. The implementor should register its - /// appropriate weight at the end of execution with the system pallet directly. +/// Elect a new set of winners, bounded by `MaxWinners`. +/// +/// Returns a result in bounded, target major format, namely as +/// *BoundedVec<(AccountId, Vec), MaxWinners>*. +pub trait BoundedElectionProvider: ElectionProviderBase { + /// The upper bound on election winners. + type MaxWinners: Get; + /// Performs the election. This should be implemented as a self-weighing function. The + /// implementor should register its appropriate weight at the end of execution with the + /// system pallet directly. + fn elect() -> Result, Self::Error>; +} + +/// Same a [`BoundedElectionProvider`], but no bounds are imposed on the number +/// of winners. +/// +/// The result is returned in a target major format, namely as +///*Vec<(AccountId, Vec)>*. +pub trait ElectionProvider: ElectionProviderBase { + /// Performs the election. This should be implemented as a self-weighing + /// function, similar to [`BoundedElectionProvider::elect()`]. fn elect() -> Result, Self::Error>; } -/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure an election needs to -/// happen instantly, not asynchronously. +/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure +/// an election needs to happen instantly, not asynchronously. /// /// The same `DataProvider` is assumed to be used. /// -/// Consequently, allows for control over the amount of data that is being fetched from the -/// [`ElectionProvider::DataProvider`]. +/// Consequently, allows for control over the amount of data that is being +/// fetched from the [`ElectionProviderBase::DataProvider`]. pub trait InstantElectionProvider: ElectionProvider { /// Elect a new set of winners, but unlike [`ElectionProvider::elect`] which cannot enforce /// bounds, this trait method can enforce bounds on the amount of data provided by the @@ -410,7 +430,7 @@ pub trait InstantElectionProvider: ElectionProvider { pub struct NoElection(sp_std::marker::PhantomData); #[cfg(feature = "std")] -impl ElectionProvider +impl ElectionProviderBase for NoElection<(AccountId, BlockNumber, DataProvider)> where DataProvider: ElectionDataProvider, @@ -420,15 +440,22 @@ where type Error = &'static str; type DataProvider = DataProvider; - fn elect() -> Result, Self::Error> { - Err(" cannot do anything.") - } - fn ongoing() -> bool { false } } +#[cfg(feature = "std")] +impl ElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider)> +where + DataProvider: ElectionDataProvider, +{ + fn elect() -> Result, Self::Error> { + Err(" cannot do anything.") + } +} + /// A utility trait for something to implement `ElectionDataProvider` in a sensible way. /// /// This is generic over `AccountId` and it can represent a validator, a nominator, or any other diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 10c3519d03df6..88aa6ca7267a0 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -20,7 +20,8 @@ //! careful when using it onchain. use crate::{ - Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolver, WeightInfo, + Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, + NposSolver, WeightInfo, }; use frame_support::{dispatch::DispatchClass, traits::Get}; use sp_npos_elections::*; @@ -133,15 +134,6 @@ fn elect_with( } impl ElectionProvider for UnboundedExecution { - type AccountId = ::AccountId; - type BlockNumber = ::BlockNumber; - type Error = Error; - type DataProvider = T::DataProvider; - - fn ongoing() -> bool { - false - } - fn elect() -> Result, Self::Error> { // This should not be called if not in `std` mode (and therefore neither in genesis nor in // testing) @@ -156,6 +148,17 @@ impl ElectionProvider for UnboundedExecution { } } +impl ElectionProviderBase for UnboundedExecution { + type AccountId = ::AccountId; + type BlockNumber = ::BlockNumber; + type Error = Error; + type DataProvider = T::DataProvider; + + fn ongoing() -> bool { + false + } +} + impl InstantElectionProvider for UnboundedExecution { fn elect_with_bounds( max_voters: usize, @@ -165,7 +168,7 @@ impl InstantElectionProvider for UnboundedExecution { } } -impl ElectionProvider for BoundedExecution { +impl ElectionProviderBase for BoundedExecution { type AccountId = ::AccountId; type BlockNumber = ::BlockNumber; type Error = Error; @@ -174,7 +177,9 @@ impl ElectionProvider for BoundedExecution { fn ongoing() -> bool { false } +} +impl ElectionProvider for BoundedExecution { fn elect() -> Result, Self::Error> { elect_with::(Some(T::VotersBound::get() as usize), Some(T::TargetsBound::get() as usize)) } diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index ed26d6b436e1d..8fdb7a79dd537 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -80,7 +80,7 @@ macro_rules! log { pub mod pallet { use super::*; use crate::types::*; - use frame_election_provider_support::ElectionProvider; + use frame_election_provider_support::ElectionProviderBase; use frame_support::{ pallet_prelude::*, traits::{Defensive, ReservableCurrency}, @@ -330,7 +330,8 @@ pub mod pallet { } } - if ::ElectionProvider::ongoing() { + if <::ElectionProvider as ElectionProviderBase>::ongoing() + { // NOTE: we assume `ongoing` does not consume any weight. // there is an ongoing election -- we better not do anything. Imagine someone is not // exposed anywhere in the last era, and the snapshot for the election is already diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 4c4c5f9ff26fd..dc2c694d52956 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -104,7 +104,7 @@ parameter_types! { } pub struct MockElection; -impl frame_election_provider_support::ElectionProvider for MockElection { +impl frame_election_provider_support::ElectionProviderBase for MockElection { type AccountId = AccountId; type BlockNumber = BlockNumber; type DataProvider = Staking; @@ -113,7 +113,9 @@ impl frame_election_provider_support::ElectionProvider for MockElection { fn ongoing() -> bool { Ongoing::get() } +} +impl frame_election_provider_support::ElectionProvider for MockElection { fn elect() -> Result, Self::Error> { Err(()) } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index dd2a9bf198f8d..514ded67ad38b 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -74,17 +74,16 @@ #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd}; -use sp_core::RuntimeDebug; +use sp_core::{bounded::BoundedVec, RuntimeDebug}; use sp_std::{ cell::RefCell, cmp::Ordering, collections::btree_map::BTreeMap, prelude::*, rc::Rc, vec, }; -use codec::{Decode, Encode, MaxEncodedLen}; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - #[cfg(test)] mod mock; #[cfg(test)] @@ -451,6 +450,11 @@ impl Default for Support { /// The main advantage of this is that it is encodable. pub type Supports = Vec<(A, Support)>; +/// Same as `Supports` bounded by `MaxWinners`. +/// +/// To note, the inner `Support` is still unbounded. +pub type BoundedSupports = BoundedVec<(A, Support), MaxWinners>; + /// Linkage from a winner to their [`Support`]. /// /// This is more helpful than a normal [`Supports`] as it allows faster error checking. From 427fd09bcb193c1e79dec85b1e207c718b686c35 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 29 Sep 2022 09:28:22 +0300 Subject: [PATCH 23/75] BEEFY: impl TypeInfo for SignedCommitment (#12382) --- primitives/beefy/src/commitment.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index 4880d4b69ab01..0e22c8d56d937 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -16,6 +16,7 @@ // limitations under the License. use codec::{Decode, Encode, Error, Input}; +use scale_info::TypeInfo; use sp_std::{cmp, prelude::*}; use crate::ValidatorSetId; @@ -39,7 +40,7 @@ pub mod known_payload_ids { /// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected /// value. Duplicated identifiers are disallowed. It's okay for different implementations to only /// support a subset of possible values. -#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash)] +#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] pub struct Payload(Vec<(BeefyPayloadId, Vec)>); impl Payload { @@ -80,7 +81,7 @@ impl Payload { /// height [block_number](Commitment::block_number). /// GRANDPA validators collect signatures on commitments and a stream of such signed commitments /// (see [SignedCommitment]) forms the BEEFY protocol. -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, TypeInfo)] pub struct Commitment { /// A collection of payloads to be signed, see [`Payload`] for details. /// @@ -138,7 +139,7 @@ where /// Note that SCALE-encoding of the structure is optimized for size efficiency over the wire, /// please take a look at custom [`Encode`] and [`Decode`] implementations and /// `CompactSignedCommitment` struct. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, TypeInfo)] pub struct SignedCommitment { /// The commitment signatures are collected for. pub commitment: Commitment, From 61b9a4d1a8a9bf39c1d89a8dd02f82785c10860c Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 29 Sep 2022 23:48:10 +0800 Subject: [PATCH 24/75] Properly set the max proof size weight on defaults and tests (#12383) * Properly set the max proof size weight on defaults and tests * cargo fmt * Set proper max proof size for contracts pallet tests * Properly set max proof size for node * Properly set max proof size for frame system mock * Update test expectations * Update test expectations * Properly set max proof size for balances mock * Update test expectations * Update test expectations * Properly set max proof size for democracy mock * Properly set max proof size for scheduler mock * Properly set max proof size for fast unstake mock * Properly set max proof size for tx payment mock * Properly set max proof size for elections phragmen mock * Properly set max proof size for node template --- bin/node-template/runtime/src/lib.rs | 7 +- bin/node/runtime/src/impls.rs | 2 +- bin/node/runtime/src/lib.rs | 4 +- frame/balances/src/tests_composite.rs | 4 +- frame/balances/src/tests_local.rs | 4 +- frame/balances/src/tests_reentrancy.rs | 4 +- frame/contracts/src/tests.rs | 14 +- frame/democracy/src/tests.rs | 4 +- .../election-provider-multi-phase/src/lib.rs | 9 +- .../election-provider-multi-phase/src/mock.rs | 7 +- .../src/signed.rs | 9 +- .../src/unsigned.rs | 292 +++++++++++++++--- frame/elections-phragmen/src/lib.rs | 4 +- frame/executive/src/lib.rs | 5 +- frame/fast-unstake/src/mock.rs | 4 +- frame/grandpa/src/tests.rs | 3 +- frame/scheduler/src/mock.rs | 4 +- frame/system/src/extensions/check_weight.rs | 20 +- frame/system/src/limits.rs | 24 +- frame/system/src/mock.rs | 2 +- .../asset-tx-payment/src/tests.rs | 2 +- frame/transaction-payment/src/lib.rs | 2 +- 22 files changed, 323 insertions(+), 107 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index f801068b10fda..1d0e18d31bf80 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -139,8 +139,11 @@ parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; /// We allow for 2 seconds of compute with a 6 second average block time. - pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2u64 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + NORMAL_DISPATCH_RATIO, + ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index fb2f3cec65290..0f9ed6e275196 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -224,7 +224,7 @@ mod multiplier_tests { fn multiplier_can_grow_from_zero() { // if the min is too small, then this will not change, and we are doomed forever. // the weight is 1/100th bigger than target. - run_with_system_weight(target() * 101 / 100, || { + run_with_system_weight(target().set_ref_time(target().ref_time() * 101 / 100), || { let next = runtime_multiplier_update(min_multiplier()); assert!(next > min_multiplier(), "{:?} !>= {:?}", next, min_multiplier()); }) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index aa1a525bf095c..5e4fdb4748d15 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -170,8 +170,8 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for 2 seconds of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2); +/// We allow for 2 seconds of compute with a 6 second average block time, with maximum proof size. +const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2).set_proof_size(u64::MAX); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 1e38d611773d4..f8a8fdd1851d4 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -47,7 +47,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index e080eafb66067..152a5da37410f 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -48,7 +48,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index fa2eb0e488e7d..90363140000e8 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -51,7 +51,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index a56e4f5564845..e5893c3dbd112 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -279,7 +279,9 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 1; } impl frame_system::Config for Test { @@ -413,7 +415,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000); +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(u64::MAX); pub struct ExtBuilder { existential_deposit: u64, @@ -628,7 +630,7 @@ fn deposit_event_max_value_limit() { RuntimeOrigin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT * 2, // we are copying a huge buffer, + GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer, None, ::Schedule::get().limits.payload_len.encode(), )); @@ -769,7 +771,7 @@ fn storage_max_value_limit() { RuntimeOrigin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT * 2, // we are copying a huge buffer + GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer None, ::Schedule::get().limits.payload_len.encode(), )); @@ -2543,7 +2545,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required), + Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2613,7 +2615,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required), + Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), None, call.encode(), false, diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 17b35ee3c38cd..03d7216fd5aaa 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -78,7 +78,9 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); + frame_system::limits::BlockWeights::simple_max( + Weight::from_ref_time(1_000_000).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 649aec30c58b3..fb17bd25ea541 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1011,10 +1011,8 @@ pub mod pallet { // unlikely to ever return an error: if phase is signed, snapshot will exist. let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; - // TODO: account for proof size weight ensure!( - Self::solution_weight_of(&raw_solution, size).ref_time() < - T::SignedMaxWeight::get().ref_time(), + Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), Error::::SignedTooMuchWeight, ); @@ -2342,9 +2340,8 @@ mod tests { }; let mut active = 1; - // TODO: account for proof size weight - while weight_with(active).ref_time() <= - ::BlockWeights::get().max_block.ref_time() || + while weight_with(active) + .all_lte(::BlockWeights::get().max_block) || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index c1c53a3980676..d3082be0cf750 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -26,7 +26,7 @@ pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; use frame_support::{ bounded_vec, parameter_types, traits::{ConstU32, Hooks}, - weights::Weight, + weights::{constants, Weight}, BoundedVec, }; use multi_phase::unsigned::{IndexAssignmentOf, VoterOf}; @@ -227,7 +227,10 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + ::with_sensible_defaults( + Weight::from_components(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + NORMAL_DISPATCH_RATIO, + ); } impl pallet_balances::Config for Runtime { diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 1cf071e6796f1..2e01d99be0a42 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -957,7 +957,7 @@ mod tests { #[test] fn cannot_consume_too_much_future_weight() { ExtBuilder::default() - .signed_weight(Weight::from_ref_time(40)) + .signed_weight(Weight::from_ref_time(40).set_proof_size(u64::MAX)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(15); @@ -973,11 +973,14 @@ mod tests { // default solution will have 5 edges (5 * 5 + 10) assert_eq!(solution_weight, Weight::from_ref_time(35)); assert_eq!(raw.solution.voter_count(), 5); - assert_eq!(::SignedMaxWeight::get(), Weight::from_ref_time(40)); + assert_eq!( + ::SignedMaxWeight::get(), + Weight::from_ref_time(40).set_proof_size(u64::MAX) + ); assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(raw.clone()))); - ::set(Weight::from_ref_time(30)); + ::set(Weight::from_ref_time(30).set_proof_size(u64::MAX)); // note: resubmitting the same solution is technically okay as long as the queue has // space. diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 281ac37421174..025ff832bb08a 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -638,8 +638,7 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - // TODO: account for proof size weight - if current_weight.ref_time() < max_weight.ref_time() { + if current_weight.all_lt(max_weight) { let next_voters = voters.checked_add(step); match next_voters { Some(voters) if voters < max_voters => Ok(voters), @@ -674,8 +673,7 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - // TODO: account for proof size weight - while voters < max_voters && weight_with(voters + 1).ref_time() < max_weight.ref_time() { + while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { voters += 1; } while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { @@ -683,9 +681,8 @@ impl Miner { } let final_decision = voters.min(size.voters); - // TODO: account for proof size weight debug_assert!( - weight_with(final_decision).ref_time() <= max_weight.ref_time(), + weight_with(final_decision).all_lte(max_weight), "weight_with({}) <= {}", final_decision, max_weight, @@ -703,151 +700,346 @@ mod max_weight { fn find_max_voter_binary_search_works() { let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; MockWeightInfo::set(crate::mock::MockedWeightInfo::Complex); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::zero()), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::zero().set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1990).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2990).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2999).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3000).set_proof_size(u64::MAX) + ), 3 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 3 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(5500)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(5500).set_proof_size(u64::MAX) + ), 5 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(7777)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(7777).set_proof_size(u64::MAX) + ), 7 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(9999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(9999).set_proof_size(u64::MAX) + ), 9 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(10_000).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(10_999).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(11_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(11_000).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(22_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(22_000).set_proof_size(u64::MAX) + ), 10 ); let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(0).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1990).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 1 ); let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(0).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 2 ); } @@ -1131,7 +1323,7 @@ mod tests { #[test] fn miner_trims_weight() { ExtBuilder::default() - .miner_weight(Weight::from_ref_time(100)) + .miner_weight(Weight::from_ref_time(100).set_proof_size(u64::MAX)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(25); @@ -1149,7 +1341,7 @@ mod tests { assert_eq!(raw.solution.voter_count(), 5); // now reduce the max weight - ::set(Weight::from_ref_time(25)); + ::set(Weight::from_ref_time(25).set_proof_size(u64::MAX)); let (raw, witness) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 0616087d975e8..165a8fcab429b 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1174,7 +1174,9 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Test { diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 014c7a2bc02a6..b7884efccf685 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -459,8 +459,7 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - // TODO: account for proof size weight - if remaining_weight.ref_time() > 0 { + if remaining_weight.all_gt(Weight::zero()) { let used_weight = >::on_idle( block_number, remaining_weight, @@ -768,7 +767,7 @@ mod tests { frame_system::limits::BlockWeights::builder() .base_block(Weight::from_ref_time(10)) .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_ref_time(5)) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).into()) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into()) .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index dc2c694d52956..71fc2d4ba905a 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -32,7 +32,9 @@ pub type T = Runtime; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Runtime { diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 5d2ebdf29cb6b..626decd12821e 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,8 +856,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - // TODO: account for proof size weight - assert!(info.weight.ref_time() > 0); + assert!(info.weight.any_gt(Weight::zero())); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 6f6667590a6c3..6aaad13e48183 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -118,7 +118,9 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); + frame_system::limits::BlockWeights::simple_max( + Weight::from_ref_time(2_000_000_000_000).set_proof_size(u64::MAX), + ); } impl system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 15a88913cd337..5c3b80f59bfa8 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -310,7 +310,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().all_gt(block_weight_limit())); + assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -367,7 +367,7 @@ mod tests { new_test_ext().execute_with(|| { System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Normal); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().all_gt(block_weight_limit())); + assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); } @@ -392,8 +392,8 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); - assert_eq!(System::block_weight().total(), block_weight_limit()); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); + assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); @@ -417,8 +417,8 @@ mod tests { // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); - assert_eq!(System::block_weight().total(), block_weight_limit()); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); + assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); }); } @@ -669,7 +669,7 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(1024 + 768)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); }); @@ -682,11 +682,11 @@ mod tests { .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { w.base_extrinsic = Weight::zero(); - w.max_total = Some(Weight::from_ref_time(20)); + w.max_total = Some(Weight::from_ref_time(20).set_proof_size(u64::MAX)); }) .for_class(DispatchClass::Mandatory, |w| { w.base_extrinsic = Weight::zero(); - w.reserved = Some(Weight::from_ref_time(5)); + w.reserved = Some(Weight::from_ref_time(5).set_proof_size(u64::MAX)); w.max_total = None; }) .build_or_panic(); @@ -695,7 +695,7 @@ mod tests { DispatchClass::Operational => Weight::from_ref_time(10), DispatchClass::Mandatory => Weight::zero(), }); - assert_eq!(maximum_weight.max_block, all_weight.total()); + assert_eq!(maximum_weight.max_block, all_weight.total().set_proof_size(u64::MAX)); // fits into reserved let mandatory1 = DispatchInfo { diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index cfc1d261baa01..07ad240afe159 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -207,7 +207,10 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults(1u64 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + Self::with_sensible_defaults( + Weight::from_components(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + DEFAULT_NORMAL_RATIO, + ) } } @@ -224,7 +227,6 @@ impl BlockWeights { } let mut error = ValidationErrors::default(); - // TODO: account for proof size weight in the assertions below for class in DispatchClass::all() { let weights = self.per_class.get(*class); let max_for_class = or_max(weights.max_total); @@ -233,16 +235,18 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class.ref_time() > self.base_block.ref_time() && max_for_class.ref_time() > base_for_class.ref_time()) - || max_for_class.ref_time() == 0, + (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) + || max_for_class == Weight::zero(), &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights.max_extrinsic.unwrap_or(Weight::zero()).ref_time() <= - max_for_class.saturating_sub(base_for_class).ref_time(), + weights + .max_extrinsic + .unwrap_or(Weight::zero()) + .all_lte(max_for_class.saturating_sub(base_for_class)), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -251,14 +255,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value).ref_time() > 0, + weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved.ref_time() > base_for_class.ref_time() || reserved.ref_time() == 0, + reserved.all_gt(base_for_class) || reserved == Weight::zero(), &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -267,7 +271,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block.ref_time() >= weights.max_total.unwrap_or(Weight::zero()).ref_time(), + self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -276,7 +280,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block.ref_time() > (base_for_class + self.base_block).ref_time(), + self.max_block.all_gt(base_for_class + self.base_block), &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index b6fc121612050..d31a1b08667e5 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -41,7 +41,7 @@ frame_support::construct_runtime!( ); const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024); +const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024).set_proof_size(u64::MAX); parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index cdf7d17898145..e775f3aa92990 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -71,7 +71,7 @@ impl Get for BlockWeights { weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).into(); + weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); }) .build_or_panic() } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 1ad6a2b3b3b6f..80297d1a0d362 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -889,7 +889,7 @@ mod tests { weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).into(); + weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); }) .build_or_panic() } From c2026ca6e9b2a24d8ae1a05c5b3784ffa0748946 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Fri, 30 Sep 2022 11:14:13 +0800 Subject: [PATCH 25/75] Carry over where clauses defined in Config to Call and Hook (#12388) --- frame/support/procedural/src/pallet/expand/call.rs | 2 +- frame/support/procedural/src/pallet/expand/hooks.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 39d16109aa8fa..6b166e6726d38 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -32,7 +32,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { (span, where_clause, methods, docs) }, - None => (def.item.span(), None, Vec::new(), Vec::new()), + None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()), }; let frame_support = &def.frame_support; let frame_system = &def.frame_system; diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index 48d4aec436d40..d8d009cf3c940 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -26,7 +26,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let has_runtime_upgrade = hooks.has_runtime_upgrade; (where_clause, span, has_runtime_upgrade) }, - None => (None, def.pallet_struct.attr_span, false), + None => (def.config.where_clause.clone(), def.pallet_struct.attr_span, false), }; let frame_support = &def.frame_support; From dbb72f3fd98253b72c0090375b738b9d00995090 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 30 Sep 2022 12:06:46 +0200 Subject: [PATCH 26/75] unsafe_pruning flag removed (#12385) --- client/cli/src/config.rs | 11 ----------- client/cli/src/params/import_params.rs | 12 ------------ 2 files changed, 23 deletions(-) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index fad2ec7bc4a93..77689708a231f 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -659,17 +659,6 @@ pub trait CliConfiguration: Sized { } } - if self.import_params().map_or(false, |p| { - #[allow(deprecated)] - p.unsafe_pruning - }) { - // according to https://github.com/substrate/issues/8103; - warn!( - "WARNING: \"--unsafe-pruning\" CLI-flag is deprecated and has no effect. \ - In future builds it will be removed, and providing this flag will lead to an error." - ); - } - Ok(()) } } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index c851050838965..3cd9fd83bd31b 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -41,18 +41,6 @@ pub struct ImportParams { #[clap(flatten)] pub database_params: DatabaseParams, - /// THIS IS A DEPRECATED CLI-ARGUMENT. - /// - /// It has been preserved in order to not break the compatibility with the existing scripts. - /// Enabling this option will lead to a runtime warning. - /// In future this option will be removed completely, thus specifying it will lead to a start - /// up error. - /// - /// Details: - #[clap(long)] - #[deprecated = "According to https://github.com/paritytech/substrate/issues/8103"] - pub unsafe_pruning: bool, - /// Method for executing Wasm runtime code. #[clap( long = "wasm-execution", From 952030cfa6f11be6aef938e5359064c4cf6b30a9 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 30 Sep 2022 13:46:48 +0300 Subject: [PATCH 27/75] pallet-mmr: generate historical proofs (#12324) * BEEFY: generate historical proofs Signed-off-by: Serban Iorga * Update frame/merkle-mountain-range/rpc/src/lib.rs Co-authored-by: Adrian Catangiu * Update primitives/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * cargo fmt * fix off-by-one in leaves powerset generation * test all possible mmr sizes for historical proofs * remove now redundant simple_historical_proof * cargo fmt Signed-off-by: Serban Iorga Co-authored-by: Adrian Catangiu Co-authored-by: Robert Hambrock --- bin/node/runtime/src/lib.rs | 39 ++- client/beefy/src/tests.rs | 7 + frame/merkle-mountain-range/rpc/src/lib.rs | 49 ++++ frame/merkle-mountain-range/src/lib.rs | 22 +- frame/merkle-mountain-range/src/tests.rs | 276 +++++++++++++++++++- primitives/merkle-mountain-range/src/lib.rs | 11 +- 6 files changed, 379 insertions(+), 25 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5e4fdb4748d15..4fa4049e22682 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2011,10 +2011,7 @@ impl_runtime_apis! { } } - impl pallet_mmr::primitives::MmrApi< - Block, - mmr::Hash, - > for Runtime { + impl pallet_mmr::primitives::MmrApi for Runtime { fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { @@ -2049,11 +2046,35 @@ impl_runtime_apis! { Ok(Mmr::mmr_root()) } - fn generate_batch_proof(leaf_indices: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Mmr::generate_batch_proof(leaf_indices) - .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) + fn generate_batch_proof( + leaf_indices: Vec, + ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + Mmr::generate_batch_proof(leaf_indices).map(|(leaves, proof)| { + ( + leaves + .into_iter() + .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + .collect(), + proof, + ) + }) + } + + fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: pallet_mmr::primitives::LeafIndex, + ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + Mmr::generate_historical_batch_proof(leaf_indices, leaves_count).map( + |(leaves, proof)| { + ( + leaves + .into_iter() + .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + .collect(), + proof, + ) + }, + ) } fn verify_batch_proof(leaves: Vec, proof: mmr::BatchProof) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 26c85592ecb85..3e49f4e05cc91 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -277,6 +277,13 @@ macro_rules! create_test_api { unimplemented!() } + fn generate_historical_batch_proof( + _leaf_indices: Vec, + _leaves_count: LeafIndex + ) -> Result<(Vec, BatchProof), MmrError> { + unimplemented!() + } + fn verify_batch_proof(_leaves: Vec, _proof: BatchProof) -> Result<(), MmrError> { unimplemented!() } diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 75032d40f492a..e939ff8ae7cd0 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -128,6 +128,31 @@ pub trait MmrApi { leaf_indices: Vec, at: Option, ) -> RpcResult>; + + /// Generate a MMR proof for the given `leaf_indices` of the MMR that had `leaves_count` leaves. + /// + /// This method calls into a runtime with MMR pallet included and attempts to generate + /// a MMR proof for the set of leaves at the given `leaf_indices` with MMR fixed to the state + /// with exactly `leaves_count` leaves. `leaves_count` must be larger than all `leaf_indices` + /// for the function to succeed. + /// + /// Optionally, a block hash at which the runtime should be queried can be specified. + /// Note that specifying the block hash isn't super-useful here, unless you're generating + /// proof using non-finalized blocks where there are several competing forks. That's because + /// MMR state will be fixed to the state with `leaves_count`, which already points to some + /// historical block. + /// + /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of + /// the leaves). Both parameters are SCALE-encoded. + /// The order of entries in the `leaves` field of the returned struct + /// is the same as the order of the entries in `leaf_indices` supplied + #[method(name = "mmr_generateHistoricalBatchProof")] + fn generate_historical_batch_proof( + &self, + leaf_indices: Vec, + leaves_count: LeafIndex, + at: Option, + ) -> RpcResult>; } /// MMR RPC methods. @@ -192,6 +217,30 @@ where Ok(LeafBatchProof::new(block_hash, leaves, proof)) } + + fn generate_historical_batch_proof( + &self, + leaf_indices: Vec, + leaves_count: LeafIndex, + at: Option<::Hash>, + ) -> RpcResult::Hash>> { + let api = self.client.runtime_api(); + let block_hash = at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash); + + let (leaves, proof) = api + .generate_historical_batch_proof_with_context( + &BlockId::hash(block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaf_indices, + leaves_count, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(LeafBatchProof::new(block_hash, leaves, proof)) + } } /// Converts a mmr-specific error into a [`CallError`]. diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 9f989847af0f9..8b4f2b60bc198 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -330,7 +330,27 @@ impl, I: 'static> Pallet { (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, > { - let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); + Self::generate_historical_batch_proof(leaf_indices, Self::mmr_leaves()) + } + + /// Generate a MMR proof for the given `leaf_indices` for the MMR of `leaves_count` size. + /// + /// Note this method can only be used from an off-chain context + /// (Offchain Worker or Runtime API call), since it requires + /// all the leaves to be present. + /// It may return an error or panic if used incorrectly. + pub fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: LeafIndex, + ) -> Result< + (Vec>, primitives::BatchProof<>::Hash>), + primitives::Error, + > { + if leaves_count > Self::mmr_leaves() { + return Err(Error::InvalidLeavesCount) + } + + let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); mmr.generate_batch_proof(leaf_indices) } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index d6886f90a5da7..bcb775ba02819 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -227,7 +227,8 @@ fn should_generate_proofs_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| add_blocks(7)); + let num_blocks: u64 = 7; + ext.execute_with(|| add_blocks(num_blocks as usize)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present @@ -241,6 +242,23 @@ fn should_generate_proofs_correctly() { crate::Pallet::::generate_batch_proof(vec![leaf_index]).unwrap() }) .collect::>(); + // when generate historical proofs for all leaves + let historical_proofs = (0_u64..crate::NumberOfLeaves::::get()) + .into_iter() + .map(|leaf_index| { + let mut proofs = vec![]; + for leaves_count in leaf_index + 1..=num_blocks { + proofs.push( + crate::Pallet::::generate_historical_batch_proof( + vec![leaf_index], + leaves_count, + ) + .unwrap(), + ) + } + proofs + }) + .collect::>(); // then assert_eq!( @@ -258,6 +276,79 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!( + historical_proofs[0][0], + ( + vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], + BatchProof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } + ) + ); + + // D + // / \ + // / \ + // A B C + // / \ / \ / \ + // 1 2 3 4 5 6 7 + // + // we're proving 3 => we need { 4, A, C++7 } + assert_eq!( + proofs[2], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 7, + items: vec![ + hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), + ], + } + ) + ); + // A + // / \ + // 1 2 3 + // + // we're proving 3 => we need { A } + assert_eq!( + historical_proofs[2][0], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 3, + items: vec![hex( + "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" + ),], + } + ) + ); + // D + // / \ + // / \ + // A B + // / \ / \ + // 1 2 3 4 5 + // we're proving 3 => we need { 4, A, 5 } + assert_eq!( + historical_proofs[2][2], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 5, + items: vec![ + hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + hex("3b031d22e24f1126c8f7d2f394b663f9b960ed7abbedb7152e17ce16112656d0") + ], + } + ) + ); + assert_eq!(historical_proofs[2][4], proofs[2]); + assert_eq!( proofs[4], ( @@ -273,6 +364,21 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!( + historical_proofs[4][0], + ( + vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], + BatchProof { + leaf_indices: vec![4], + leaf_count: 5, + items: vec![hex( + "ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252" + ),], + } + ) + ); + assert_eq!(historical_proofs[4][2], proofs[4]); + assert_eq!( proofs[6], ( @@ -287,6 +393,7 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!(historical_proofs[6][0], proofs[6]); }); } @@ -302,9 +409,8 @@ fn should_generate_batch_proof_correctly() { // to retrieve full leaf data. register_offchain_ext(&mut ext); ext.execute_with(|| { - // when generate proofs for all leaves + // when generate proofs for a batch of leaves let (.., proof) = crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap(); - // then assert_eq!( proof, @@ -318,6 +424,28 @@ fn should_generate_batch_proof_correctly() { ], } ); + + // when generate historical proofs for a batch of leaves + let (.., historical_proof) = + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap(); + // then + assert_eq!( + historical_proof, + BatchProof { + leaf_indices: vec![0, 4, 5], + leaf_count: 6, + items: vec![ + hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), + hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), + ], + } + ); + + // when generate historical proofs for a batch of leaves + let (.., historical_proof) = + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 7).unwrap(); + // then + assert_eq!(historical_proof, proof); }); } @@ -338,11 +466,33 @@ fn should_verify() { // when crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); + let (simple_historical_leaves, simple_historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + }); + let (advanced_historical_leaves, advanced_historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 7).unwrap() + }); ext.execute_with(|| { add_blocks(7); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); + assert_eq!( + crate::Pallet::::verify_leaves( + simple_historical_leaves, + simple_historical_proof5 + ), + Ok(()) + ); + assert_eq!( + crate::Pallet::::verify_leaves( + advanced_historical_leaves, + advanced_historical_proof5 + ), + Ok(()) + ); }); } @@ -350,16 +500,40 @@ fn should_verify() { fn should_verify_batch_proofs() { fn generate_and_verify_batch_proof( ext: &mut sp_io::TestExternalities, - leaves: &Vec, + leaf_indices: &Vec, blocks_to_add: usize, ) { - let (leaves, proof) = ext - .execute_with(|| crate::Pallet::::generate_batch_proof(leaves.to_vec()).unwrap()); + let (leaves, proof) = ext.execute_with(|| { + crate::Pallet::::generate_batch_proof(leaf_indices.to_vec()).unwrap() + }); + + let mmr_size = ext.execute_with(|| crate::Pallet::::mmr_leaves()); + let min_mmr_size = leaf_indices.iter().max().unwrap() + 1; + + // generate historical proofs for all possible mmr sizes, + // lower bound being index of highest leaf to be proven + let historical_proofs = (min_mmr_size..=mmr_size) + .map(|mmr_size| { + ext.execute_with(|| { + crate::Pallet::::generate_historical_batch_proof( + leaf_indices.to_vec(), + mmr_size, + ) + .unwrap() + }) + }) + .collect::>(); ext.execute_with(|| { add_blocks(blocks_to_add); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); + historical_proofs.iter().for_each(|(leaves, proof)| { + assert_eq!( + crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), + Ok(()) + ); + }); }) } @@ -378,7 +552,7 @@ fn should_verify_batch_proofs() { ext.persist_offchain_overlay(); // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n - let leaves_set: Vec> = (0..n).into_iter().powerset().skip(1).collect(); + let leaves_set: Vec> = (0..=n).into_iter().powerset().skip(1).collect(); leaves_set.iter().for_each(|leaves_subset| { generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); @@ -393,7 +567,7 @@ fn should_verify_batch_proofs() { ext.persist_offchain_overlay(); // generate all possible 2-leaf combinations for mmr size n - let leaves_set: Vec> = (0..n).into_iter().combinations(2).collect(); + let leaves_set: Vec> = (0..=n).into_iter().combinations(2).collect(); leaves_set.iter().for_each(|leaves_subset| { generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); @@ -414,7 +588,13 @@ fn verification_should_be_stateless() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(7)); + let (root_6, root_7) = ext.execute_with(|| { + add_blocks(6); + let root_6 = crate::Pallet::::mmr_root_hash(); + add_blocks(1); + let root_7 = crate::Pallet::::mmr_root_hash(); + (root_6, root_7) + }); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -424,12 +604,27 @@ fn verification_should_be_stateless() { // when crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let (_, historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + }); // Verify proof without relying on any on-chain data. let leaf = crate::primitives::DataOrHash::Data(leaves[0].clone()); assert_eq!( - crate::verify_leaves_proof::<::Hashing, _>(root, vec![leaf], proof5), + crate::verify_leaves_proof::<::Hashing, _>( + root_7, + vec![leaf.clone()], + proof5 + ), + Ok(()) + ); + assert_eq!( + crate::verify_leaves_proof::<::Hashing, _>( + root_6, + vec![leaf], + historical_proof5 + ), Ok(()) ); } @@ -441,7 +636,13 @@ fn should_verify_batch_proof_statelessly() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(7)); + let (root_6, root_7) = ext.execute_with(|| { + add_blocks(6); + let root_6 = crate::Pallet::::mmr_root_hash(); + add_blocks(1); + let root_7 = crate::Pallet::::mmr_root_hash(); + (root_6, root_7) + }); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -451,12 +652,15 @@ fn should_verify_batch_proof_statelessly() { // when crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let (historical_leaves, historical_proof) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap() + }); // Verify proof without relying on any on-chain data. assert_eq!( crate::verify_leaves_proof::<::Hashing, _>( - root, + root_7, leaves .into_iter() .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) @@ -465,6 +669,17 @@ fn should_verify_batch_proof_statelessly() { ), Ok(()) ); + assert_eq!( + crate::verify_leaves_proof::<::Hashing, _>( + root_6, + historical_leaves + .into_iter() + .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) + .collect(), + historical_proof + ), + Ok(()) + ); } #[test] @@ -721,3 +936,36 @@ fn should_verify_canonicalized() { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); }); } + +#[test] +fn does_not_panic_when_generating_historical_proofs() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + + // given 7 blocks (7 MMR leaves) + ext.execute_with(|| add_blocks(7)); + ext.persist_offchain_overlay(); + + // Try to generate historical proof with invalid arguments. This requires the offchain + // extensions to be present to retrieve full leaf data. + register_offchain_ext(&mut ext); + ext.execute_with(|| { + // when leaf index is invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![10], 7), + Err(Error::LeafNotFound), + ); + + // when leaves count is invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![3], 100), + Err(Error::InvalidLeavesCount), + ); + + // when both leaf index and leaves count are invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![10], 100), + Err(Error::InvalidLeavesCount), + ); + }); +} diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 29a7e3d1a6fb6..c40a594739ec1 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -402,6 +402,8 @@ pub enum Error { PalletNotIncluded, /// Cannot find the requested leaf index InvalidLeafIndex, + /// The provided leaves count is larger than the actual leaves count. + InvalidLeavesCount, } impl Error { @@ -455,7 +457,14 @@ sp_api::decl_runtime_apis! { fn mmr_root() -> Result; /// Generate MMR proof for a series of leaves under given indices. - fn generate_batch_proof(leaf_indices: Vec) -> Result<(Vec, BatchProof), Error>; + fn generate_batch_proof(leaf_indices: Vec) + -> Result<(Vec, BatchProof), Error>; + + /// Generate MMR proof for a series of leaves under given indices, using MMR at given `leaves_count` size. + fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: LeafIndex + ) -> Result<(Vec, BatchProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. /// From 37664fe5b3513eb996225f016eceaf74963b8133 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Sun, 2 Oct 2022 17:16:45 +0200 Subject: [PATCH 28/75] Remove contracts RPCs (#12358) * Remove contracts RPCs * Remove serde as RPC serialization is no longer needed * Rename folder to match crate name * Compile fix * Remove Byte wrapper --- Cargo.lock | 26 +- Cargo.toml | 4 +- bin/node/rpc/Cargo.toml | 1 - bin/node/rpc/src/lib.rs | 3 - bin/node/runtime/Cargo.toml | 6 +- bin/node/runtime/src/lib.rs | 2 +- frame/contracts/Cargo.toml | 2 +- .../{common => primitives}/Cargo.toml | 8 - .../{common => primitives}/README.md | 0 .../{common => primitives}/src/lib.rs | 91 +-- frame/contracts/rpc/Cargo.toml | 30 - frame/contracts/rpc/README.md | 3 - frame/contracts/rpc/runtime-api/README.md | 7 - frame/contracts/rpc/src/lib.rs | 524 ------------------ .../{rpc => }/runtime-api/Cargo.toml | 12 +- frame/contracts/runtime-api/README.md | 7 + .../{rpc => }/runtime-api/src/lib.rs | 6 +- frame/contracts/src/exec.rs | 27 +- frame/contracts/src/lib.rs | 8 +- frame/contracts/src/tests.rs | 15 +- frame/contracts/src/wasm/mod.rs | 103 ++-- frame/contracts/src/wasm/runtime.rs | 10 +- 22 files changed, 103 insertions(+), 792 deletions(-) rename frame/contracts/{common => primitives}/Cargo.toml (70%) rename frame/contracts/{common => primitives}/README.md (100%) rename frame/contracts/{common => primitives}/src/lib.rs (74%) delete mode 100644 frame/contracts/rpc/Cargo.toml delete mode 100644 frame/contracts/rpc/README.md delete mode 100644 frame/contracts/rpc/runtime-api/README.md delete mode 100644 frame/contracts/rpc/src/lib.rs rename frame/contracts/{rpc => }/runtime-api/Cargo.toml (78%) create mode 100644 frame/contracts/runtime-api/README.md rename frame/contracts/{rpc => }/runtime-api/src/lib.rs (94%) diff --git a/Cargo.lock b/Cargo.lock index de50d4ec27105..723a09ee9a39f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3371,7 +3371,7 @@ dependencies = [ "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", + "pallet-contracts-runtime-api", "pallet-conviction-voting", "pallet-democracy", "pallet-election-provider-multi-phase", @@ -4825,7 +4825,6 @@ version = "3.0.0-dev" dependencies = [ "jsonrpsee", "node-primitives", - "pallet-contracts-rpc", "pallet-mmr-rpc", "pallet-transaction-payment-rpc", "sc-chain-spec", @@ -5530,10 +5529,6 @@ version = "6.0.0" dependencies = [ "bitflags", "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-rpc", "sp-runtime", "sp-std", ] @@ -5548,24 +5543,7 @@ dependencies = [ ] [[package]] -name = "pallet-contracts-rpc" -version = "4.0.0-dev" -dependencies = [ - "jsonrpsee", - "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", - "parity-scale-codec", - "serde", - "serde_json", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-rpc", - "sp-runtime", -] - -[[package]] -name = "pallet-contracts-rpc-runtime-api" +name = "pallet-contracts-runtime-api" version = "4.0.0-dev" dependencies = [ "pallet-contracts-primitives", diff --git a/Cargo.toml b/Cargo.toml index 25f12a2c9fd3f..02bc6aede8669 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,8 +86,8 @@ members = [ "frame/child-bounties", "frame/collective", "frame/contracts", - "frame/contracts/rpc", - "frame/contracts/rpc/runtime-api", + "frame/contracts/primitives", + "frame/contracts/runtime-api", "frame/conviction-voting", "frame/democracy", "frame/fast-unstake", diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 0b69ae27010fa..1f93feabf2f1e 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -14,7 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } -pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 0e6b04087fa63..94e01619c6e63 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -108,7 +108,6 @@ where + Send + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_contracts_rpc::ContractsRuntimeApi, C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, @@ -118,7 +117,6 @@ where B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use pallet_contracts_rpc::{Contracts, ContractsApiServer}; use pallet_mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_consensus_babe_rpc::{Babe, BabeApiServer}; @@ -150,7 +148,6 @@ where // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.merge(Contracts::new(client.clone()).into_rpc())?; io.merge(Mmr::new(client.clone()).into_rpc())?; io.merge(TransactionPayment::new(client.clone()).into_rpc())?; io.merge( diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index e722024231651..ac3afc19da50f 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -61,8 +61,8 @@ pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../ pallet-child-bounties = { version = "4.0.0-dev", default-features = false, path = "../../../frame/child-bounties" } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } +pallet-contracts-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/runtime-api/" } pallet-conviction-voting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/conviction-voting" } pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } @@ -139,7 +139,7 @@ std = [ "pallet-collective/std", "pallet-contracts/std", "pallet-contracts-primitives/std", - "pallet-contracts-rpc-runtime-api/std", + "pallet-contracts-runtime-api/std", "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4fa4049e22682..f0c68b5b225cd 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1942,7 +1942,7 @@ impl_runtime_apis! { } } - impl pallet_contracts_rpc_runtime_api::ContractsApi< + impl pallet_contracts_runtime_api::ContractsApi< Block, AccountId, Balance, BlockNumber, Hash, > for Runtime diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 30fbad680ebe5..7c3b677e06436 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -36,7 +36,7 @@ rand_pcg = { version = "0.3", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "common" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "primitives" } pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/primitives/Cargo.toml similarity index 70% rename from frame/contracts/common/Cargo.toml rename to frame/contracts/primitives/Cargo.toml index 49d7973ab155f..64e332007350b 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -15,23 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } -serde = { version = "1", features = ["derive"], optional = true } # Substrate Dependencies (This crate should not rely on frame) -sp-core = { version = "6.0.0", path = "../../../primitives/core", default-features = false } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } -sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc", optional = true } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] std = [ "codec/std", - "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", - "sp-rpc", - "serde", ] diff --git a/frame/contracts/common/README.md b/frame/contracts/primitives/README.md similarity index 100% rename from frame/contracts/common/README.md rename to frame/contracts/primitives/README.md diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/primitives/src/lib.rs similarity index 74% rename from frame/contracts/common/src/lib.rs rename to frame/contracts/primitives/src/lib.rs index f810725afcd36..5daf875ac2651 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -21,32 +21,16 @@ use bitflags::bitflags; use codec::{Decode, Encode}; -use sp_core::Bytes; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, RuntimeDebug, }; use sp_std::prelude::*; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "std")] -use sp_rpc::number::NumberOrHex; - /// Result type of a `bare_call` or `bare_instantiate` call. /// /// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "R: Serialize, Balance: Copy + Into"), - bound(deserialize = "R: Deserialize<'de>, Balance: TryFrom") - ) -)] pub struct ContractResult { /// How much gas was consumed during execution. pub gas_consumed: u64, @@ -80,7 +64,6 @@ pub struct ContractResult { /// /// The debug message is never generated during on-chain execution. It is reserved for /// RPC calls. - #[cfg_attr(feature = "std", serde(with = "as_string"))] pub debug_message: Vec, /// The execution result of the wasm code. pub result: R, @@ -113,8 +96,6 @@ pub enum ContractAccessError { bitflags! { /// Flags used by a contract to customize exit behaviour. #[derive(Encode, Decode)] - #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - #[cfg_attr(feature = "std", serde(rename_all = "camelCase", transparent))] pub struct ReturnFlags: u32 { /// If this bit is set all changes made by the contract execution are rolled back. const REVERT = 0x0000_0001; @@ -123,13 +104,11 @@ bitflags! { /// Output of a contract call or instantiation which ran to completion. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct ExecReturnValue { /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. pub flags: ReturnFlags, /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. - pub data: Bytes, + pub data: Vec, } impl ExecReturnValue { @@ -141,8 +120,6 @@ impl ExecReturnValue { /// The result of a successful contract instantiation. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct InstantiateReturnValue { /// The output of the called constructor. pub result: ExecReturnValue, @@ -152,63 +129,40 @@ pub struct InstantiateReturnValue { /// The result of succesfully uploading a contract. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "CodeHash: Serialize, Balance: Copy + Into"), - bound(deserialize = "CodeHash: Deserialize<'de>, Balance: TryFrom") - ) -)] pub struct CodeUploadReturnValue { /// The key under which the new code is stored. pub code_hash: CodeHash, /// The deposit that was reserved at the caller. Is zero when the code already existed. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] pub deposit: Balance, } /// Reference to an existing code hash or a new wasm module. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub enum Code { /// A wasm module as raw bytes. - Upload(Bytes), + Upload(Vec), /// The code hash of an on-chain wasm blob. Existing(Hash), } impl>, Hash> From for Code { fn from(from: T) -> Self { - Code::Upload(Bytes(from.into())) + Code::Upload(from.into()) } } /// The amount of balance that was either charged or refunded in order to pay for storage. #[derive(Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "Balance: Copy + Into"), - bound(deserialize = "Balance: TryFrom") - ) -)] pub enum StorageDeposit { /// The transaction reduced storage consumption. /// /// This means that the specified amount of balance was transferred from the involved /// contracts to the call origin. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] Refund(Balance), /// The transaction increased overall storage usage. /// /// This means that the specified amount of balance was transferred from the call origin /// to the contracts involved. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] Charge(Balance), } @@ -295,42 +249,3 @@ where } } } - -#[cfg(feature = "std")] -mod as_string { - use super::*; - use serde::{ser::Error, Deserializer, Serializer}; - - pub fn serialize(bytes: &Vec, serializer: S) -> Result { - std::str::from_utf8(bytes) - .map_err(|e| S::Error::custom(format!("Debug buffer contains invalid UTF8: {}", e)))? - .serialize(serializer) - } - - pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { - Ok(String::deserialize(deserializer)?.into_bytes()) - } -} - -#[cfg(feature = "std")] -mod as_hex { - use super::*; - use serde::{de::Error as _, Deserializer, Serializer}; - - pub fn serialize(balance: &Balance, serializer: S) -> Result - where - S: Serializer, - Balance: Copy + Into, - { - Into::::into(*balance).serialize(serializer) - } - - pub fn deserialize<'de, D, Balance>(deserializer: D) -> Result - where - D: Deserializer<'de>, - Balance: TryFrom, - { - Balance::try_from(NumberOrHex::deserialize(deserializer)?) - .map_err(|_| D::Error::custom("Cannot decode NumberOrHex to Balance")) - } -} diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml deleted file mode 100644 index 7876c7cba40d0..0000000000000 --- a/frame/contracts/rpc/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "pallet-contracts-rpc" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Node-specific RPC methods for interaction with contracts." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } -serde = { version = "1", features = ["derive"] } - -# Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-core = { version = "6.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc" } -sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } - -[dev-dependencies] -serde_json = "1" diff --git a/frame/contracts/rpc/README.md b/frame/contracts/rpc/README.md deleted file mode 100644 index be6df237bf60d..0000000000000 --- a/frame/contracts/rpc/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Node-specific RPC methods for interaction with contracts. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/README.md b/frame/contracts/rpc/runtime-api/README.md deleted file mode 100644 index d57f29a93bd1d..0000000000000 --- a/frame/contracts/rpc/runtime-api/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Runtime API definition required by Contracts RPC extensions. - -This API should be imported and implemented by the runtime, -of a node that wants to use the custom RPC extension -adding Contracts access methods. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs deleted file mode 100644 index 1df7a5753f77e..0000000000000 --- a/frame/contracts/rpc/src/lib.rs +++ /dev/null @@ -1,524 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Node-specific RPC methods for interaction with contracts. - -#![warn(unused_crate_dependencies)] - -use std::{marker::PhantomData, sync::Arc}; - -use codec::Codec; -use jsonrpsee::{ - core::{async_trait, Error as JsonRpseeError, RpcResult}, - proc_macros::rpc, - types::error::{CallError, ErrorCode, ErrorObject}, -}; -use pallet_contracts_primitives::{ - Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, -}; -use serde::{Deserialize, Serialize}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_core::Bytes; -use sp_rpc::number::NumberOrHex; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; - -pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; - -const RUNTIME_ERROR: i32 = 1; -const CONTRACT_DOESNT_EXIST: i32 = 2; -const KEY_DECODING_FAILED: i32 = 3; - -pub type Weight = u64; - -/// A rough estimate of how much gas a decent hardware consumes per second, -/// using native execution. -/// This value is used to set the upper bound for maximal contract calls to -/// prevent blocking the RPC for too long. -/// -/// As 1 gas is equal to 1 weight we base this on the conducted benchmarks which -/// determined runtime weights: -/// -const GAS_PER_SECOND: Weight = 1_000_000_000_000; - -/// The maximum amount of weight that the call and instantiate rpcs are allowed to consume. -/// This puts a ceiling on the weight limit that is supplied to the rpc as an argument. -const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; - -/// A private newtype for converting `ContractAccessError` into an RPC error. -struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); - -impl From for JsonRpseeError { - fn from(e: ContractAccessError) -> Self { - use pallet_contracts_primitives::ContractAccessError::*; - match e.0 { - DoesntExist => CallError::Custom(ErrorObject::owned( - CONTRACT_DOESNT_EXIST, - "The specified contract doesn't exist.", - None::<()>, - )) - .into(), - KeyDecodingFailed => CallError::Custom(ErrorObject::owned( - KEY_DECODING_FAILED, - "Failed to decode the specified storage key.", - None::<()>, - )) - .into(), - } - } -} - -/// A struct that encodes RPC parameters required for a call to a smart-contract. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct CallRequest { - origin: AccountId, - dest: AccountId, - value: NumberOrHex, - gas_limit: NumberOrHex, - storage_deposit_limit: Option, - input_data: Bytes, -} - -/// A struct that encodes RPC parameters required to instantiate a new smart-contract. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct InstantiateRequest { - origin: AccountId, - value: NumberOrHex, - gas_limit: NumberOrHex, - storage_deposit_limit: Option, - code: Code, - data: Bytes, - salt: Bytes, -} - -/// A struct that encodes RPC parameters required for a call to upload a new code. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct CodeUploadRequest { - origin: AccountId, - code: Bytes, - storage_deposit_limit: Option, -} - -/// Contracts RPC methods. -#[rpc(client, server)] -pub trait ContractsApi -where - Balance: Copy + TryFrom + Into, -{ - /// Executes a call to a contract. - /// - /// This call is performed locally without submitting any transactions. Thus executing this - /// won't change any state. Nonetheless, the calling state-changing contracts is still possible. - /// - /// This method is useful for calling getter-like methods on contracts or to dry-run a - /// a contract call in order to determine the `gas_limit`. - #[method(name = "contracts_call")] - fn call( - &self, - call_request: CallRequest, - at: Option, - ) -> RpcResult>; - - /// Instantiate a new contract. - /// - /// This instantiate is performed locally without submitting any transactions. Thus the contract - /// is not actually created. - /// - /// This method is useful for UIs to dry-run contract instantiations. - #[method(name = "contracts_instantiate")] - fn instantiate( - &self, - instantiate_request: InstantiateRequest, - at: Option, - ) -> RpcResult>; - - /// Upload new code without instantiating a contract from it. - /// - /// This upload is performed locally without submitting any transactions. Thus executing this - /// won't change any state. - /// - /// This method is useful for UIs to dry-run code upload. - #[method(name = "contracts_upload_code")] - fn upload_code( - &self, - upload_request: CodeUploadRequest, - at: Option, - ) -> RpcResult>; - - /// Returns the value under a specified storage `key` in a contract given by `address` param, - /// or `None` if it is not set. - #[method(name = "contracts_getStorage")] - fn get_storage( - &self, - address: AccountId, - key: Bytes, - at: Option, - ) -> RpcResult>; -} - -/// Contracts RPC methods. -pub struct Contracts { - client: Arc, - _marker: PhantomData, -} - -impl Contracts { - /// Create new `Contracts` with the given reference to the client. - pub fn new(client: Arc) -> Self { - Self { client, _marker: Default::default() } - } -} - -#[async_trait] -impl - ContractsApiServer< - ::Hash, - <::Header as HeaderT>::Number, - AccountId, - Balance, - Hash, - > for Contracts -where - Block: BlockT, - Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - Client::Api: ContractsRuntimeApi< - Block, - AccountId, - Balance, - <::Header as HeaderT>::Number, - Hash, - >, - AccountId: Codec, - Balance: Codec + Copy + TryFrom + Into, - Hash: Codec, -{ - fn call( - &self, - call_request: CallRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let CallRequest { origin, dest, value, gas_limit, storage_deposit_limit, input_data } = - call_request; - - let value: Balance = decode_hex(value, "balance")?; - let gas_limit: u64 = decode_hex(gas_limit, "weight")?; - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - limit_gas(gas_limit)?; - - api.call(&at, origin, dest, value, gas_limit, storage_deposit_limit, input_data.to_vec()) - .map_err(runtime_error_into_rpc_err) - } - - fn instantiate( - &self, - instantiate_request: InstantiateRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let InstantiateRequest { - origin, - value, - gas_limit, - storage_deposit_limit, - code, - data, - salt, - } = instantiate_request; - - let value: Balance = decode_hex(value, "balance")?; - let gas_limit: u64 = decode_hex(gas_limit, "weight")?; - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - limit_gas(gas_limit)?; - - api.instantiate( - &at, - origin, - value, - gas_limit, - storage_deposit_limit, - code, - data.to_vec(), - salt.to_vec(), - ) - .map_err(runtime_error_into_rpc_err) - } - - fn upload_code( - &self, - upload_request: CodeUploadRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let CodeUploadRequest { origin, code, storage_deposit_limit } = upload_request; - - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - - api.upload_code(&at, origin, code.to_vec(), storage_deposit_limit) - .map_err(runtime_error_into_rpc_err) - } - - fn get_storage( - &self, - address: AccountId, - key: Bytes, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); - let result = api - .get_storage(&at, address, key.to_vec()) - .map_err(runtime_error_into_rpc_err)? - .map_err(ContractAccessError)? - .map(Bytes); - - Ok(result) - } -} - -/// Converts a runtime trap into an RPC error. -fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> JsonRpseeError { - CallError::Custom(ErrorObject::owned( - RUNTIME_ERROR, - "Runtime error", - Some(format!("{:?}", err)), - )) - .into() -} - -fn decode_hex>(from: H, name: &str) -> RpcResult { - from.try_into().map_err(|_| { - JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!("{:?} does not fit into the {} type", from, name), - None::<()>, - ))) - }) -} - -fn limit_gas(gas_limit: Weight) -> RpcResult<()> { - if gas_limit > GAS_LIMIT { - Err(JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!( - "Requested gas limit is greater than maximum allowed: {} > {}", - gas_limit, GAS_LIMIT - ), - None::<()>, - )))) - } else { - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use pallet_contracts_primitives::{ContractExecResult, ContractInstantiateResult}; - use sp_core::U256; - - fn trim(json: &str) -> String { - json.chars().filter(|c| !c.is_whitespace()).collect() - } - - #[test] - fn call_request_should_serialize_deserialize_properly() { - type Req = CallRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "dest": "5DRakbLVnjVrW6niwLfHGW24EeCEvDAFGEXrtaYS5M4ynoom", - "value": "0x112210f4B16c1cb1", - "gasLimit": 1000000000000, - "storageDepositLimit": 5000, - "inputData": "0x8c97db39" - } - "#, - ) - .unwrap(); - assert_eq!(req.gas_limit.into_u256(), U256::from(0xe8d4a51000u64)); - assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); - assert_eq!(req.value.into_u256(), U256::from(1234567890987654321u128)); - } - - #[test] - fn instantiate_request_should_serialize_deserialize_properly() { - type Req = InstantiateRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "value": "0x88", - "gasLimit": 42, - "code": { "existing": "0x1122" }, - "data": "0x4299", - "salt": "0x9988" - } - "#, - ) - .unwrap(); - - assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); - assert_eq!(req.value.into_u256(), 0x88.into()); - assert_eq!(req.gas_limit.into_u256(), 42.into()); - assert_eq!(req.storage_deposit_limit, None); - assert_eq!(&*req.data, [0x42, 0x99].as_ref()); - assert_eq!(&*req.salt, [0x99, 0x88].as_ref()); - let code = match req.code { - Code::Existing(hash) => hash, - _ => panic!("json encoded an existing hash"), - }; - assert_eq!(&code, "0x1122"); - } - - #[test] - fn code_upload_request_should_serialize_deserialize_properly() { - type Req = CodeUploadRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "code": "0x8c97db39", - "storageDepositLimit": 5000 - } - "#, - ) - .unwrap(); - assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); - assert_eq!(&*req.code, [0x8c, 0x97, 0xdb, 0x39].as_ref()); - assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); - } - - #[test] - fn call_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: ContractExecResult = serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "gasConsumed": 5000, - "gasRequired": 8000, - "storageDeposit": {"charge": 42000}, - "debugMessage": "HelloWorld", - "result": { - "Ok": { - "flags": 5, - "data": "0x1234" - } - } - }"#, - ); - test( - r#"{ - "gasConsumed": 3400, - "gasRequired": 5200, - "storageDeposit": {"refund": 12000}, - "debugMessage": "HelloWorld", - "result": { - "Err": "BadOrigin" - } - }"#, - ); - } - - #[test] - fn instantiate_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: ContractInstantiateResult = - serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "gasConsumed": 5000, - "gasRequired": 8000, - "storageDeposit": {"refund": 12000}, - "debugMessage": "HelloWorld", - "result": { - "Ok": { - "result": { - "flags": 5, - "data": "0x1234" - }, - "accountId": "5CiPP" - } - } - }"#, - ); - test( - r#"{ - "gasConsumed": 3400, - "gasRequired": 5200, - "storageDeposit": {"charge": 0}, - "debugMessage": "HelloWorld", - "result": { - "Err": "BadOrigin" - } - }"#, - ); - } - - #[test] - fn code_upload_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: CodeUploadResult = serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "Ok": { - "codeHash": 4711, - "deposit": 99 - } - }"#, - ); - test( - r#"{ - "Err": "BadOrigin" - }"#, - ); - } -} diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/runtime-api/Cargo.toml similarity index 78% rename from frame/contracts/rpc/runtime-api/Cargo.toml rename to frame/contracts/runtime-api/Cargo.toml index bd07d577ec272..05b0e05d4c568 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/runtime-api/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "pallet-contracts-rpc-runtime-api" +name = "pallet-contracts-runtime-api" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "Runtime API definition required by Contracts RPC extensions." +description = "Runtime API definition used to provide dry-run capabilities" readme = "README.md" [package.metadata.docs.rs] @@ -17,10 +17,10 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } # Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../common" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../../../primitives/std" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../primitives" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } [features] default = ["std"] diff --git a/frame/contracts/runtime-api/README.md b/frame/contracts/runtime-api/README.md new file mode 100644 index 0000000000000..fed285b23b2ac --- /dev/null +++ b/frame/contracts/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition used to provide dry-run capabilities + +This API should be imported and implemented by the runtime, +of a node that wants to provide clients with dry-run +capabilities. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/runtime-api/src/lib.rs similarity index 94% rename from frame/contracts/rpc/runtime-api/src/lib.rs rename to frame/contracts/runtime-api/src/lib.rs index 9765b37057c7b..79fd20c8c0163 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/runtime-api/src/lib.rs @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Runtime API definition required by Contracts RPC extensions. +//! Runtime API definition used to provide dry-run capabilities. //! //! This API should be imported and implemented by the runtime, -//! of a node that wants to use the custom RPC extension -//! adding Contracts access methods. +//! of a node that wants to provide clients with dry-run +//! capabilities. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 6260dd41de707..bf35410d0bd4b 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1384,7 +1384,6 @@ mod tests { use frame_system::{EventRecord, Phase}; use pallet_contracts_primitives::ReturnFlags; use pretty_assertions::assert_eq; - use sp_core::Bytes; use sp_runtime::{traits::Hash, DispatchError}; use std::{ cell::RefCell, @@ -1517,7 +1516,7 @@ mod tests { } fn exec_success() -> ExecResult { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) } fn exec_trapped() -> ExecResult { @@ -1586,7 +1585,7 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1621,13 +1620,13 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); let delegate_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1662,7 +1661,7 @@ mod tests { let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1715,7 +1714,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) }); ExtBuilder::default().build().execute_with(|| { @@ -1736,7 +1735,7 @@ mod tests { let output = result.unwrap(); assert!(!output.did_revert()); - assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); + assert_eq!(output.data, vec![1, 2, 3, 4]); }); } @@ -1747,7 +1746,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) }); ExtBuilder::default().build().execute_with(|| { @@ -1768,7 +1767,7 @@ mod tests { let output = result.unwrap(); assert!(output.did_revert()); - assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); + assert_eq!(output.data, vec![1, 2, 3, 4]); }); } @@ -2115,7 +2114,7 @@ mod tests { #[test] fn instantiation_work_with_success_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2140,7 +2139,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == Bytes(vec![80, 65, 83, 83]) => address + Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); // Check that the newly created account has the expected code hash and @@ -2159,7 +2158,7 @@ mod tests { #[test] fn instantiation_fails_with_failing_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2184,7 +2183,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == Bytes(vec![70, 65, 73, 76]) => address + Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); // Check that the account has not been created. diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index f9a1c8decf042..3aeb8742705c2 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -123,7 +123,7 @@ use pallet_contracts_primitives::{ StorageDeposit, }; use scale_info::TypeInfo; -use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; @@ -512,7 +512,7 @@ pub mod pallet { value, gas_limit, storage_deposit_limit.map(Into::into), - Code::Upload(Bytes(code)), + Code::Upload(code), data, salt, None, @@ -743,7 +743,7 @@ pub mod pallet { value, gas_limit, storage_deposit_limit.map(Into::into), - Code::Upload(Bytes(code)), + Code::Upload(code), data, salt, None, @@ -1234,7 +1234,7 @@ where let try_exec = || { let schedule = T::Schedule::get(); let (extra_deposit, executable) = match code { - Code::Upload(Bytes(binary)) => { + Code::Upload(binary) => { let executable = PrefabWasmModule::from_code(binary, &schedule, origin.clone()) .map_err(|(err, msg)| { debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index e5893c3dbd112..b4a8f8f4c834f 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -44,7 +44,6 @@ use frame_support::{ }; use frame_system::{self as system, EventRecord, Phase}; use pretty_assertions::{assert_eq, assert_ne}; -use sp_core::Bytes; use sp_io::hashing::blake2_256; use sp_keystore::{testing::KeyStore, KeystoreExt}; use sp_runtime::{ @@ -1722,7 +1721,7 @@ fn chain_extension_works() { let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); - assert_eq!(result.result.unwrap().data, Bytes(input)); + assert_eq!(result.result.unwrap().data, input); // 1 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( @@ -1787,7 +1786,7 @@ fn chain_extension_works() { .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, Bytes(vec![42, 99])); + assert_eq!(result.data, vec![42, 99]); // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer // We set the MSB part to 1 (instead of 0) which routes the request into the second @@ -1804,7 +1803,7 @@ fn chain_extension_works() { .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, Bytes(vec![0x4B, 0x1D])); + assert_eq!(result.data, vec![0x4B, 0x1D]); // Diverging to third chain extension that is disabled // We set the MSB part to 2 (instead of 0) which routes the request into the third extension @@ -2672,7 +2671,7 @@ fn ecdsa_recover() { .result .unwrap(); assert!(!result.did_revert()); - assert_eq!(result.data.as_ref(), &EXPECTED_COMPRESSED_PUBLIC_KEY); + assert_eq!(result.data, EXPECTED_COMPRESSED_PUBLIC_KEY); }) } @@ -3503,7 +3502,7 @@ fn contract_reverted() { .result .unwrap(); assert_eq!(result.result.flags, flags); - assert_eq!(result.result.data.0, buffer); + assert_eq!(result.result.data, buffer); assert!(!>::contains_key(result.account_id)); // Pass empty flags and therefore successfully instantiate the contract for later use. @@ -3539,7 +3538,7 @@ fn contract_reverted() { .result .unwrap(); assert_eq!(result.flags, flags); - assert_eq!(result.data.0, buffer); + assert_eq!(result.data, buffer); }); } @@ -3559,7 +3558,7 @@ fn code_rejected_error_works() { 0, GAS_LIMIT, None, - Code::Upload(Bytes(wasm)), + Code::Upload(wasm), vec![], vec![], true, diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index d8b4cd245356e..b341ae3bd155d 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -281,7 +281,7 @@ mod tests { }; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; - use sp_core::{Bytes, H256}; + use sp_core::H256; use sp_runtime::DispatchError; use std::{ borrow::BorrowMut, @@ -341,8 +341,8 @@ mod tests { } /// The call is mocked and just returns this hardcoded value. - fn call_return_data() -> Bytes { - Bytes(vec![0xDE, 0xAD, 0xBE, 0xEF]) + fn call_return_data() -> Vec { + vec![0xDE, 0xAD, 0xBE, 0xEF] } impl Default for MockExt { @@ -404,7 +404,7 @@ mod tests { }); Ok(( Contracts::::contract_address(&ALICE, &code_hash, salt), - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, + ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }, )) } fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { @@ -804,7 +804,7 @@ mod tests { let mut mock_ext = MockExt::default(); let input = vec![0xff, 0x2a, 0x99, 0x88]; let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); - assert_eq!(result.data.0, input); + assert_eq!(result.data, input); assert_eq!( &mock_ext.calls, &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: true }] @@ -907,15 +907,15 @@ mod tests { // value does not exist -> sentinel value returned let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); // value did exist -> success let result = execute(CODE, [1u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1,); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1,); // value did exist -> success (zero sized type) let result = execute(CODE, [2u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0,); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0,); } #[test] @@ -977,13 +977,13 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); // value exists let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); @@ -991,7 +991,7 @@ mod tests { let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[2u8; 19].to_vec()).unwrap(), &([] as [u8; 0])); } @@ -1234,7 +1234,7 @@ mod tests { let output = execute(CODE_ECDSA_TO_ETH_ADDRESS, vec![], MockExt::default()).unwrap(); assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x02; 20].to_vec()) } + ExecReturnValue { flags: ReturnFlags::empty(), data: [0x02; 20].to_vec() } ); } @@ -1311,7 +1311,7 @@ mod tests { assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x22; 32].to_vec()) } + ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() } ); } @@ -1630,10 +1630,7 @@ mod tests { fn return_from_start_fn() { let output = execute(CODE_RETURN_FROM_START_FN, vec![], MockExt::default()).unwrap(); - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) } - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }); } const CODE_TIMESTAMP_NOW: &str = r#" @@ -1902,15 +1899,13 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes( - ( - array_bytes::hex2array_unchecked::<32>( - "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" - ), - 42u64, - ) - .encode() - ), + data: ( + array_bytes::hex2array_unchecked::<32>( + "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" + ), + 42u64, + ) + .encode() }, ); } @@ -2124,7 +2119,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes(array_bytes::hex2bytes_unchecked("445566778899")), + data: array_bytes::hex2bytes_unchecked("445566778899"), } ); assert!(!output.did_revert()); @@ -2143,7 +2138,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::REVERT, - data: Bytes(array_bytes::hex2bytes_unchecked("5566778899")), + data: array_bytes::hex2bytes_unchecked("5566778899"), } ); assert!(output.did_revert()); @@ -2306,7 +2301,7 @@ mod tests { let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); assert_eq!(*ext.runtime_calls.borrow(), vec![call]); // 0 = ReturnCode::Success - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); } #[test] @@ -2371,19 +2366,19 @@ mod tests { // value did not exist before -> sentinel returned let input = ([1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = ([1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = ([1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } @@ -2442,19 +2437,19 @@ mod tests { // value did not exist before -> sentinel returned let input = (32, [1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = (32, [1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = (32, [1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } @@ -2527,7 +2522,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2535,21 +2530,21 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); - assert_eq!(&result.data.0[4..], &[42u8]); + assert_eq!(&result.data[4..], &[42u8]); // value exists (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), Some(&vec![])); - assert_eq!(&result.data.0[4..], &([] as [u8; 0])); + assert_eq!(&result.data[4..], &([] as [u8; 0])); } #[test] @@ -2611,14 +2606,14 @@ mod tests { let input = (32, [3u8; 32]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[3u8; 32].to_vec()), None); // value did exist let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); // value cleared assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); @@ -2626,14 +2621,14 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); // value exists let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned (test for 0 sized) - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); // value cleared assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); } @@ -2710,7 +2705,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2718,21 +2713,21 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); - assert_eq!(&result.data.0[4..], &[42u8]); + assert_eq!(&result.data[4..], &[42u8]); // value did exist -> length returned (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); - assert_eq!(&result.data.0[4..], &[0u8; 0]); + assert_eq!(&result.data[4..], &[0u8; 0]); } #[test] @@ -2769,10 +2764,7 @@ mod tests { let output = execute(CODE_IS_CONTRACT, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 1u32 (`true`). - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(1u32.encode()) }, - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 1u32.encode() },); } #[test] @@ -2906,10 +2898,7 @@ mod tests { let output = execute(CODE_CALLER_IS_ORIGIN, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 0u32 (`false`) - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(0u32.encode()) }, - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 0u32.encode() },); } #[test] diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index edd413aa45bf0..3296492994071 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -30,7 +30,7 @@ use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pallet_contracts_proc_macro::define_env; -use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_core::crypto::UncheckedFrom; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::{Bounded, Zero}; use sp_sandbox::SandboxMemory; @@ -483,10 +483,10 @@ where TrapReason::Return(ReturnData { flags, data }) => { let flags = ReturnFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?; - Ok(ExecReturnValue { flags, data: Bytes(data) }) + Ok(ExecReturnValue { flags, data }) }, TrapReason::Termination => - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), TrapReason::SupervisorError(error) => return Err(error.into()), } } @@ -494,7 +494,7 @@ where // Check the exact type of the error. match sandbox_result { // No traps were generated. Proceed normally. - Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), // `Error::Module` is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). // This shouldn't happen because validation process ought to reject such binaries. @@ -879,7 +879,7 @@ where if let Ok(return_value) = call_outcome { return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), - data: return_value.data.0, + data: return_value.data, })) } } From 9472af8e2af41b47a471c084035bf8aecf61d8da Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 3 Oct 2022 16:00:57 +0300 Subject: [PATCH 29/75] Beefy on-demand justifications as a custom RequestResponse protocol (#12124) * client/beefy: create communication module and move gossip there * client/beefy: move beefy_protocol_name module to communication * client/beefy: move notification module under communication * client/beefy: add incoming request_response protocol handler * client/beefy: keep track of connected peers and their progress * client/beefy: add logic for generating Justif requests * client/beefy: cancel outdated on-demand justification requests * try Andre's suggestion for JustificationEngine * justif engine add justifs validation * client/beefy: impl OnDemandJustificationsEngine async next() * move beefy proto name test * client/beefy: initialize OnDemandJustificationsEngine * client/tests: allow for custom req-resp protocols * client/beefy: on-demand-justif: implement simple peer selection strategy * client/beefy: fix voter initialization Fix corner case where voter gets a single burst of finality notifications just when it starts. The notification stream was consumed by "wait_for_pallet" logic, then main loop would subscribe to finality notifications, but by that time some notifications might've been lost. Fix this by subscribing the main loop to notifications before waiting for pallet to become available. Share the same stream with the main loop so that notifications for blocks before pallet available are ignored, while _all_ notifications after pallet available are processed. Add regression test for this. Signed-off-by: acatangiu * client/beefy: make sure justif requests are always out for mandatory blocks * client/beefy: add test for on-demand justifications sync * client/beefy: tweak main loop event processing order * client/beefy: run on-demand-justif-handler under same async task as voter * client/beefy: add test for known-peers * client/beefy: reorg request-response module * client/beefy: add issue references for future work todos * client/beefy: consolidate on-demand-justifications engine state machine Signed-off-by: acatangiu * client/beefy: fix for polkadot companion * client/beefy: implement review suggestions * cargo fmt and clippy * fix merge damage * fix rust-doc * fix merge damage * fix merge damage * client/beefy: add test for justif proto name Signed-off-by: acatangiu --- client/beefy/rpc/src/lib.rs | 6 +- .../beefy/src/{ => communication}/gossip.rs | 20 +- client/beefy/src/communication/mod.rs | 118 +++++++ .../src/{ => communication}/notification.rs | 0 client/beefy/src/communication/peers.rs | 131 ++++++++ .../incoming_requests_handler.rs | 193 ++++++++++++ .../src/communication/request_response/mod.rs | 101 ++++++ .../outgoing_requests_engine.rs | 245 +++++++++++++++ client/beefy/src/import.rs | 2 +- client/beefy/src/lib.rs | 131 ++++---- client/beefy/src/round.rs | 20 +- client/beefy/src/tests.rs | 287 ++++++++++++------ client/beefy/src/worker.rs | 166 +++++++--- client/network/test/src/lib.rs | 7 +- 14 files changed, 1208 insertions(+), 219 deletions(-) rename client/beefy/src/{ => communication}/gossip.rs (94%) create mode 100644 client/beefy/src/communication/mod.rs rename client/beefy/src/{ => communication}/notification.rs (100%) create mode 100644 client/beefy/src/communication/peers.rs create mode 100644 client/beefy/src/communication/request_response/incoming_requests_handler.rs create mode 100644 client/beefy/src/communication/request_response/mod.rs create mode 100644 client/beefy/src/communication/request_response/outgoing_requests_engine.rs diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 3be182ceb8f39..0af474116e6d0 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -35,7 +35,9 @@ use jsonrpsee::{ }; use log::warn; -use beefy_gadget::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}; +use beefy_gadget::communication::notification::{ + BeefyBestBlockStream, BeefyVersionedFinalityProofStream, +}; mod notification; @@ -165,8 +167,8 @@ mod tests { use super::*; use beefy_gadget::{ + communication::notification::BeefyVersionedFinalityProofSender, justification::BeefyVersionedFinalityProof, - notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofSender}, }; use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; use codec::{Decode, Encode}; diff --git a/client/beefy/src/gossip.rs b/client/beefy/src/communication/gossip.rs similarity index 94% rename from client/beefy/src/gossip.rs rename to client/beefy/src/communication/gossip.rs index 02d5efe9e0e58..6c41a2e48932a 100644 --- a/client/beefy/src/gossip.rs +++ b/client/beefy/src/communication/gossip.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::BTreeMap, time::Duration}; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; use sc_network::PeerId; use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; @@ -28,13 +28,12 @@ use log::{debug, trace}; use parking_lot::{Mutex, RwLock}; use wasm_timer::Instant; +use crate::{communication::peers::KnownPeers, keystore::BeefyKeystore}; use beefy_primitives::{ crypto::{Public, Signature}, VoteMessage, }; -use crate::keystore::BeefyKeystore; - // Timeout for rebroadcasting messages. const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); @@ -103,17 +102,19 @@ where topic: B::Hash, known_votes: RwLock>, next_rebroadcast: Mutex, + known_peers: Arc>>, } impl GossipValidator where B: Block, { - pub fn new() -> GossipValidator { + pub fn new(known_peers: Arc>>) -> GossipValidator { GossipValidator { topic: topic::(), known_votes: RwLock::new(KnownVotes::new()), next_rebroadcast: Mutex::new(Instant::now() + REBROADCAST_AFTER), + known_peers, } } @@ -165,6 +166,7 @@ where if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { self.known_votes.write().add_known(&round, msg_hash); + self.known_peers.lock().note_vote_for(*sender, round); return ValidationResult::ProcessAndKeep(self.topic) } else { // TODO: report peer @@ -271,7 +273,7 @@ mod tests { #[test] fn note_and_drop_round_works() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.note_round(1u64); @@ -298,7 +300,7 @@ mod tests { #[test] fn note_same_round_twice() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.note_round(3u64); gv.note_round(7u64); @@ -355,7 +357,7 @@ mod tests { #[test] fn should_avoid_verifying_signatures_twice() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let mut context = TestContext; @@ -391,7 +393,7 @@ mod tests { #[test] fn messages_allowed_and_expired() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let topic = Default::default(); let intent = MessageIntent::Broadcast; @@ -434,7 +436,7 @@ mod tests { #[test] fn messages_rebroadcast() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let topic = Default::default(); diff --git a/client/beefy/src/communication/mod.rs b/client/beefy/src/communication/mod.rs new file mode 100644 index 0000000000000..93646677c0ecd --- /dev/null +++ b/client/beefy/src/communication/mod.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Communication streams for the BEEFY networking protocols. + +pub mod notification; +pub mod request_response; + +pub(crate) mod gossip; +pub(crate) mod peers; + +pub(crate) mod beefy_protocol_name { + use array_bytes::bytes2hex; + use sc_network::ProtocolName; + + /// BEEFY votes gossip protocol name suffix. + const GOSSIP_NAME: &str = "/beefy/1"; + /// BEEFY justifications protocol name suffix. + const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; + + /// Old names for the gossip protocol, used for backward compatibility. + pub(super) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; + + /// Name of the votes gossip protocol used by BEEFY. + /// + /// Must be registered towards the networking in order for BEEFY voter to properly function. + pub fn gossip_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); + if let Some(fork_id) = fork_id { + format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, GOSSIP_NAME).into() + } else { + format!("/{}{}", bytes2hex("", genesis_hash), GOSSIP_NAME).into() + } + } + + /// Name of the BEEFY justifications request-response protocol. + pub fn justifications_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); + if let Some(fork_id) = fork_id { + format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, JUSTIFICATIONS_NAME).into() + } else { + format!("/{}{}", bytes2hex("", genesis_hash), JUSTIFICATIONS_NAME).into() + } + } +} + +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +/// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`]. +pub fn beefy_peers_set_config( + gossip_protocol_name: sc_network::ProtocolName, +) -> sc_network_common::config::NonDefaultSetConfig { + let mut cfg = + sc_network_common::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); + + cfg.allow_non_reserved(25, 25); + cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); + cfg +} + +#[cfg(test)] +mod tests { + use super::*; + + use sp_core::H256; + + #[test] + fn beefy_protocols_names() { + use beefy_protocol_name::{gossip_protocol_name, justifications_protocol_name}; + // Create protocol name using random genesis hash. + let genesis_hash = H256::random(); + let genesis_hex = array_bytes::bytes2hex("", genesis_hash.as_ref()); + + let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); + assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); + + let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); + let justif_proto_name = justifications_protocol_name(&genesis_hash, None); + assert_eq!(justif_proto_name.to_string(), expected_justif_name); + + // Create protocol name using hardcoded genesis hash. Verify exact representation. + let genesis_hash = [ + 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, + 94, 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, + ]; + let genesis_hex = "32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93"; + + let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); + assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); + + let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); + let justif_proto_name = justifications_protocol_name(&genesis_hash, None); + assert_eq!(justif_proto_name.to_string(), expected_justif_name); + } +} diff --git a/client/beefy/src/notification.rs b/client/beefy/src/communication/notification.rs similarity index 100% rename from client/beefy/src/notification.rs rename to client/beefy/src/communication/notification.rs diff --git a/client/beefy/src/communication/peers.rs b/client/beefy/src/communication/peers.rs new file mode 100644 index 0000000000000..0e20a0f4e0ff6 --- /dev/null +++ b/client/beefy/src/communication/peers.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Logic for keeping track of BEEFY peers. + +// TODO (issue #12296): replace this naive peer tracking with generic one that infers data +// from multiple network protocols. + +use sc_network::PeerId; +use sp_runtime::traits::{Block, NumberFor, Zero}; +use std::collections::{HashMap, VecDeque}; + +struct PeerData { + last_voted_on: NumberFor, +} + +impl Default for PeerData { + fn default() -> Self { + PeerData { last_voted_on: Zero::zero() } + } +} + +/// Keep a simple map of connected peers +/// and the most recent voting round they participated in. +pub struct KnownPeers { + live: HashMap>, +} + +impl KnownPeers { + pub fn new() -> Self { + Self { live: HashMap::new() } + } + + /// Add new connected `peer`. + pub fn add_new(&mut self, peer: PeerId) { + self.live.entry(peer).or_default(); + } + + /// Note vote round number for `peer`. + pub fn note_vote_for(&mut self, peer: PeerId, round: NumberFor) { + let data = self.live.entry(peer).or_default(); + data.last_voted_on = round.max(data.last_voted_on); + } + + /// Remove connected `peer`. + pub fn remove(&mut self, peer: &PeerId) { + self.live.remove(peer); + } + + /// Return _filtered and cloned_ list of peers that have voted on `block` or higher. + pub fn at_least_at_block(&self, block: NumberFor) -> VecDeque { + self.live + .iter() + .filter_map(|(k, v)| (v.last_voted_on >= block).then_some(k)) + .cloned() + .collect() + } + + /// Answer whether `peer` is part of `KnownPeers` set. + pub fn contains(&self, peer: &PeerId) -> bool { + self.live.contains_key(peer) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_track_known_peers_progress() { + let (alice, bob, charlie) = (PeerId::random(), PeerId::random(), PeerId::random()); + let mut peers = KnownPeers::::new(); + assert!(peers.live.is_empty()); + + // Alice and Bob new connected peers. + peers.add_new(alice); + peers.add_new(bob); + // 'Tracked' Bob seen voting for 5. + peers.note_vote_for(bob, 5); + // Previously unseen Charlie now seen voting for 10. + peers.note_vote_for(charlie, 10); + + assert_eq!(peers.live.len(), 3); + assert!(peers.contains(&alice)); + assert!(peers.contains(&bob)); + assert!(peers.contains(&charlie)); + + // Get peers at block >= 5 + let at_5 = peers.at_least_at_block(5); + // Should be Bob and Charlie + assert_eq!(at_5.len(), 2); + assert!(at_5.contains(&bob)); + assert!(at_5.contains(&charlie)); + + // 'Tracked' Alice seen voting for 10. + peers.note_vote_for(alice, 10); + + // Get peers at block >= 9 + let at_9 = peers.at_least_at_block(9); + // Should be Charlie and Alice + assert_eq!(at_9.len(), 2); + assert!(at_9.contains(&charlie)); + assert!(at_9.contains(&alice)); + + // Remove Alice + peers.remove(&alice); + assert_eq!(peers.live.len(), 2); + assert!(!peers.contains(&alice)); + + // Get peers at block >= 9 + let at_9 = peers.at_least_at_block(9); + // Now should be just Charlie + assert_eq!(at_9.len(), 1); + assert!(at_9.contains(&charlie)); + } +} diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs new file mode 100644 index 0000000000000..c0910a60fba3b --- /dev/null +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -0,0 +1,193 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) BEEFY justifications requests from a remote peer. + +use beefy_primitives::BEEFY_ENGINE_ID; +use codec::Decode; +use futures::{ + channel::{mpsc, oneshot}, + StreamExt, +}; +use log::{debug, trace}; +use sc_client_api::BlockBackend; +use sc_network::{config as netconfig, config::RequestResponseConfig, PeerId, ReputationChange}; +use sc_network_common::protocol::ProtocolName; +use sp_runtime::{generic::BlockId, traits::Block}; +use std::{marker::PhantomData, sync::Arc}; + +use crate::communication::request_response::{ + on_demand_justifications_protocol_config, Error, JustificationRequest, +}; + +/// A request coming in, including a sender for sending responses. +#[derive(Debug)] +pub(crate) struct IncomingRequest { + /// `PeerId` of sending peer. + pub peer: PeerId, + /// The sent request. + pub payload: JustificationRequest, + /// Sender for sending response back. + pub pending_response: oneshot::Sender, +} + +impl IncomingRequest { + /// Create new `IncomingRequest`. + pub fn new( + peer: PeerId, + payload: JustificationRequest, + pending_response: oneshot::Sender, + ) -> Self { + Self { peer, payload, pending_response } + } + + /// Try building from raw network request. + /// + /// This function will fail if the request cannot be decoded and will apply passed in + /// reputation changes in that case. + /// + /// Params: + /// - The raw request to decode + /// - Reputation changes to apply for the peer in case decoding fails. + pub fn try_from_raw( + raw: netconfig::IncomingRequest, + reputation_changes: Vec, + ) -> Result { + let netconfig::IncomingRequest { payload, peer, pending_response } = raw; + let payload = match JustificationRequest::decode(&mut payload.as_ref()) { + Ok(payload) => payload, + Err(err) => { + let response = netconfig::OutgoingResponse { + result: Err(()), + reputation_changes, + sent_feedback: None, + }; + if let Err(_) = pending_response.send(response) { + return Err(Error::DecodingErrorNoReputationChange(peer, err)) + } + return Err(Error::DecodingError(peer, err)) + }, + }; + Ok(Self::new(peer, payload, pending_response)) + } +} + +/// Receiver for incoming BEEFY justifications requests. +/// +/// Takes care of decoding and handling of invalid encoded requests. +pub(crate) struct IncomingRequestReceiver { + raw: mpsc::Receiver, +} + +impl IncomingRequestReceiver { + pub fn new(inner: mpsc::Receiver) -> Self { + Self { raw: inner } + } + + /// Try to receive the next incoming request. + /// + /// Any received request will be decoded, on decoding errors the provided reputation changes + /// will be applied and an error will be reported. + pub async fn recv(&mut self, reputation_changes: F) -> Result, Error> + where + B: Block, + F: FnOnce() -> Vec, + { + let req = match self.raw.next().await { + None => return Err(Error::RequestChannelExhausted), + Some(raw) => IncomingRequest::::try_from_raw(raw, reputation_changes())?, + }; + Ok(req) + } +} + +/// Handler for incoming BEEFY justifications requests from a remote peer. +pub struct BeefyJustifsRequestHandler { + pub(crate) request_receiver: IncomingRequestReceiver, + pub(crate) justif_protocol_name: ProtocolName, + pub(crate) client: Arc, + pub(crate) _block: PhantomData, +} + +impl BeefyJustifsRequestHandler +where + B: Block, + Client: BlockBackend + Send + Sync, +{ + /// Create a new [`BeefyJustifsRequestHandler`]. + pub fn new>( + genesis_hash: Hash, + fork_id: Option<&str>, + client: Arc, + ) -> (Self, RequestResponseConfig) { + let (request_receiver, config) = + on_demand_justifications_protocol_config(genesis_hash, fork_id); + let justif_protocol_name = config.name.clone(); + + (Self { request_receiver, justif_protocol_name, client, _block: PhantomData }, config) + } + + /// Network request-response protocol name used by this handler. + pub fn protocol_name(&self) -> ProtocolName { + self.justif_protocol_name.clone() + } + + // Sends back justification response if justification found in client backend. + fn handle_request(&self, request: IncomingRequest) -> Result<(), Error> { + // TODO (issue #12293): validate `request` and change peer reputation for invalid requests. + + let maybe_encoded_proof = self + .client + .justifications(&BlockId::Number(request.payload.begin)) + .map_err(Error::Client)? + .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) + // No BEEFY justification present. + .ok_or(()); + + request + .pending_response + .send(netconfig::OutgoingResponse { + result: maybe_encoded_proof, + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .map_err(|_| Error::SendResponse) + } + + /// Run [`BeefyJustifsRequestHandler`]. + pub async fn run(mut self) { + trace!(target: "beefy::sync", "🥩 Running BeefyJustifsRequestHandler"); + + while let Ok(request) = self.request_receiver.recv(|| vec![]).await { + let peer = request.peer; + match self.handle_request(request) { + Ok(()) => { + debug!( + target: "beefy::sync", + "🥩 Handled BEEFY justification request from {:?}.", peer + ) + }, + Err(e) => { + // TODO (issue #12293): apply reputation changes here based on error type. + debug!( + target: "beefy::sync", + "🥩 Failed to handle BEEFY justification request from {:?}: {}", peer, e, + ) + }, + } + } + } +} diff --git a/client/beefy/src/communication/request_response/mod.rs b/client/beefy/src/communication/request_response/mod.rs new file mode 100644 index 0000000000000..c83bb9d57e91b --- /dev/null +++ b/client/beefy/src/communication/request_response/mod.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Request/response protocol for syncing BEEFY justifications. + +mod incoming_requests_handler; +pub(crate) mod outgoing_requests_engine; + +pub use incoming_requests_handler::BeefyJustifsRequestHandler; + +use futures::channel::mpsc; +use std::time::Duration; + +use codec::{Decode, Encode, Error as CodecError}; +use sc_network::{config::RequestResponseConfig, PeerId}; +use sp_runtime::traits::{Block, NumberFor}; + +use crate::communication::beefy_protocol_name::justifications_protocol_name; +use incoming_requests_handler::IncomingRequestReceiver; + +// 10 seems reasonable, considering justifs are explicitly requested only +// for mandatory blocks, by nodes that are syncing/catching-up. +const JUSTIF_CHANNEL_SIZE: usize = 10; + +const MAX_RESPONSE_SIZE: u64 = 1024 * 1024; +const JUSTIF_REQUEST_TIMEOUT: Duration = Duration::from_secs(3); + +/// Get the configuration for the BEEFY justifications Request/response protocol. +/// +/// Returns a receiver for messages received on this protocol and the requested +/// `ProtocolConfig`. +/// +/// Consider using [`BeefyJustifsRequestHandler`] instead of this low-level function. +pub(crate) fn on_demand_justifications_protocol_config>( + genesis_hash: Hash, + fork_id: Option<&str>, +) -> (IncomingRequestReceiver, RequestResponseConfig) { + let name = justifications_protocol_name(genesis_hash, fork_id); + let fallback_names = vec![]; + let (tx, rx) = mpsc::channel(JUSTIF_CHANNEL_SIZE); + let rx = IncomingRequestReceiver::new(rx); + let cfg = RequestResponseConfig { + name, + fallback_names, + max_request_size: 32, + max_response_size: MAX_RESPONSE_SIZE, + // We are connected to all validators: + request_timeout: JUSTIF_REQUEST_TIMEOUT, + inbound_queue: Some(tx), + }; + (rx, cfg) +} + +/// BEEFY justification request. +#[derive(Debug, Clone, Encode, Decode)] +pub struct JustificationRequest { + /// Start collecting proofs from this block. + pub begin: NumberFor, +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + #[error(transparent)] + RuntimeApi(#[from] sp_api::ApiError), + + /// Decoding failed, we were able to change the peer's reputation accordingly. + #[error("Decoding request failed for peer {0}.")] + DecodingError(PeerId, #[source] CodecError), + + /// Decoding failed, but sending reputation change failed. + #[error("Decoding request failed for peer {0}, and changing reputation failed.")] + DecodingErrorNoReputationChange(PeerId, #[source] CodecError), + + /// Incoming request stream exhausted. Should only happen on shutdown. + #[error("Incoming request channel got closed.")] + RequestChannelExhausted, + + #[error("Failed to send response.")] + SendResponse, + + #[error("Received invalid response.")] + InvalidResponse, +} diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs new file mode 100644 index 0000000000000..e22958e19cd2e --- /dev/null +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Generating request logic for request/response protocol for syncing BEEFY justifications. + +use beefy_primitives::{crypto::AuthorityId, BeefyApi, ValidatorSet}; +use codec::Encode; +use futures::{ + channel::{oneshot, oneshot::Canceled}, + stream::{self, StreamExt}, +}; +use log::{debug, error, warn}; +use parking_lot::Mutex; +use sc_network::{PeerId, ProtocolName}; +use sc_network_common::{ + request_responses::{IfDisconnected, RequestFailure}, + service::NetworkRequest, +}; +use sp_api::ProvideRuntimeApi; +use sp_runtime::{ + generic::BlockId, + traits::{Block, NumberFor}, +}; +use std::{collections::VecDeque, result::Result, sync::Arc}; + +use crate::{ + communication::request_response::{Error, JustificationRequest}, + justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, + KnownPeers, +}; + +/// Response type received from network. +type Response = Result, RequestFailure>; +/// Used to receive a response from the network. +type ResponseReceiver = oneshot::Receiver; + +enum State { + Idle(stream::Pending>), + AwaitingResponse(PeerId, NumberFor, stream::Once), +} + +pub struct OnDemandJustificationsEngine { + network: Arc, + runtime: Arc, + protocol_name: ProtocolName, + + live_peers: Arc>>, + peers_cache: VecDeque, + + state: State, +} + +impl OnDemandJustificationsEngine +where + B: Block, + R: ProvideRuntimeApi, + R::Api: BeefyApi, +{ + pub fn new( + network: Arc, + runtime: Arc, + protocol_name: ProtocolName, + live_peers: Arc>>, + ) -> Self { + Self { + network, + runtime, + protocol_name, + live_peers, + peers_cache: VecDeque::new(), + state: State::Idle(stream::pending()), + } + } + + fn reset_peers_cache_for_block(&mut self, block: NumberFor) { + // TODO (issue #12296): replace peer selection with generic one that involves all protocols. + self.peers_cache = self.live_peers.lock().at_least_at_block(block); + } + + fn try_next_peer(&mut self) -> Option { + // TODO (issue #12296): replace peer selection with generic one that involves all protocols. + let live = self.live_peers.lock(); + while let Some(peer) = self.peers_cache.pop_front() { + if live.contains(&peer) { + return Some(peer) + } + } + None + } + + fn request_from_peer(&mut self, peer: PeerId, block: NumberFor) { + debug!(target: "beefy::sync", "🥩 requesting justif #{:?} from peer {:?}", block, peer); + + let payload = JustificationRequest:: { begin: block }.encode(); + + let (tx, rx) = oneshot::channel(); + + self.network.start_request( + peer, + self.protocol_name.clone(), + payload, + tx, + IfDisconnected::ImmediateError, + ); + + self.state = State::AwaitingResponse(peer, block, stream::once(rx)); + } + + /// If no other request is in progress, start new justification request for `block`. + pub fn request(&mut self, block: NumberFor) { + // ignore new requests while there's already one pending + match &self.state { + State::AwaitingResponse(_, _, _) => return, + State::Idle(_) => (), + } + self.reset_peers_cache_for_block(block); + + // Start the requests engine - each unsuccessful received response will automatically + // trigger a new request to the next peer in the `peers_cache` until there are none left. + if let Some(peer) = self.try_next_peer() { + self.request_from_peer(peer, block); + } else { + debug!(target: "beefy::sync", "🥩 no good peers to request justif #{:?} from", block); + } + } + + /// Cancel any pending request for block numbers smaller or equal to `block`. + pub fn cancel_requests_older_than(&mut self, block: NumberFor) { + match &self.state { + State::AwaitingResponse(_, number, _) if *number <= block => { + debug!( + target: "beefy::sync", + "🥩 cancel pending request for justification #{:?}", + number + ); + self.state = State::Idle(stream::pending()); + }, + _ => (), + } + } + + fn process_response( + &mut self, + peer: PeerId, + block: NumberFor, + validator_set: &ValidatorSet, + response: Result, + ) -> Result, Error> { + response + .map_err(|e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} hung up: {:?}", + block, peer, e + ); + Error::InvalidResponse + })? + .map_err(|e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} error: {:?}", + block, peer, e + ); + Error::InvalidResponse + }) + .and_then(|encoded| { + decode_and_verify_finality_proof::(&encoded[..], block, &validator_set).map_err( + |e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} responded with invalid proof: {:?}", + block, peer, e + ); + Error::InvalidResponse + }, + ) + }) + } + + pub async fn next(&mut self) -> Option> { + let (peer, block, resp) = match &mut self.state { + State::Idle(pending) => { + let _ = pending.next().await; + // This never happens since 'stream::pending' never generates any items. + return None + }, + State::AwaitingResponse(peer, block, receiver) => { + let resp = receiver.next().await?; + (*peer, *block, resp) + }, + }; + // We received the awaited response. Our 'stream::once()' receiver will never generate any + // other response, meaning we're done with current state. Move the engine to `State::Idle`. + self.state = State::Idle(stream::pending()); + + let block_id = BlockId::number(block); + let validator_set = self + .runtime + .runtime_api() + .validator_set(&block_id) + .map_err(|e| { + error!(target: "beefy::sync", "🥩 Runtime API error {:?} in on-demand justif engine.", e); + e + }) + .ok()? + .or_else(|| { + error!(target: "beefy::sync", "🥩 BEEFY pallet not available for block {:?}.", block); + None + })?; + + self.process_response(peer, block, &validator_set, resp) + .map_err(|_| { + // No valid justification received, try next peer in our set. + if let Some(peer) = self.try_next_peer() { + self.request_from_peer(peer, block); + } else { + warn!(target: "beefy::sync", "🥩 ran out of peers to request justif #{:?} from", block); + } + }) + .map(|proof| { + debug!( + target: "beefy::sync", + "🥩 received valid on-demand justif #{:?} from {:?}", + block, peer + ); + proof + }) + .ok() + } +} diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index db4d8bfba7450..89a4517334189 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -33,8 +33,8 @@ use sc_client_api::backend::Backend; use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; use crate::{ + communication::notification::BeefyVersionedFinalityProofSender, justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, - notification::BeefyVersionedFinalityProofSender, }; /// A block-import handler for BEEFY. diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index ad527b2929585..7407f101e99a5 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -17,10 +17,12 @@ // along with this program. If not, see . use beefy_primitives::{BeefyApi, MmrRootHash}; +use parking_lot::Mutex; use prometheus::Registry; -use sc_client_api::{Backend, BlockchainEvents, Finalizer}; +use sc_client_api::{Backend, BlockBackend, BlockchainEvents, Finalizer}; use sc_consensus::BlockImport; use sc_network::ProtocolName; +use sc_network_common::service::NetworkRequest; use sc_network_gossip::Network as GossipNetwork; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; @@ -28,68 +30,38 @@ use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::Block; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; mod error; -mod gossip; mod keystore; mod metrics; mod round; mod worker; +pub mod communication; pub mod import; pub mod justification; -pub mod notification; #[cfg(test)] mod tests; use crate::{ - import::BeefyBlockImport, - notification::{ - BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, - BeefyVersionedFinalityProofStream, + communication::{ + notification::{ + BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, + BeefyVersionedFinalityProofStream, + }, + peers::KnownPeers, + request_response::{ + outgoing_requests_engine::OnDemandJustificationsEngine, BeefyJustifsRequestHandler, + }, }, + import::BeefyBlockImport, }; -pub use beefy_protocol_name::standard_name as protocol_standard_name; - -pub(crate) mod beefy_protocol_name { - use sc_chain_spec::ChainSpec; - use sc_network::ProtocolName; - - const NAME: &str = "/beefy/1"; - /// Old names for the notifications protocol, used for backward compatibility. - pub(crate) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; - - /// Name of the notifications protocol used by BEEFY. - /// - /// Must be registered towards the networking in order for BEEFY to properly function. - pub fn standard_name>( - genesis_hash: &Hash, - chain_spec: &Box, - ) -> ProtocolName { - let genesis_hash = genesis_hash.as_ref(); - let chain_prefix = match chain_spec.fork_id() { - Some(fork_id) => format!("/{}/{}", array_bytes::bytes2hex("", genesis_hash), fork_id), - None => format!("/{}", array_bytes::bytes2hex("", genesis_hash)), - }; - format!("{}{}", chain_prefix, NAME).into() - } -} - -/// Returns the configuration value to put in -/// [`sc_network::config::NetworkConfiguration::extra_sets`]. -/// For standard protocol name see [`beefy_protocol_name::standard_name`]. -pub fn beefy_peers_set_config( - protocol_name: ProtocolName, -) -> sc_network_common::config::NonDefaultSetConfig { - let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); - - cfg.allow_non_reserved(25, 25); - cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); - cfg -} +pub use communication::beefy_protocol_name::{ + gossip_protocol_name, justifications_protocol_name as justifs_protocol_name, +}; /// A convenience BEEFY client trait that defines all the type bounds a BEEFY client /// has to satisfy. Ideally that should actually be a trait alias. Unfortunately as @@ -159,13 +131,13 @@ where { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - notification::BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = - notification::BeefyBestBlockStream::::channel(); + BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - notification::BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); // BlockImport let import = @@ -180,6 +152,24 @@ where (import, voter_links, rpc_links) } +/// BEEFY gadget network parameters. +pub struct BeefyNetworkParams +where + B: Block, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, +{ + /// Network implementing gossip, requests and sync-oracle. + pub network: Arc, + /// Chain specific BEEFY gossip protocol name. See + /// [`communication::beefy_protocol_name::gossip_protocol_name`]. + pub gossip_protocol_name: ProtocolName, + /// Chain specific BEEFY on-demand justifications protocol name. See + /// [`communication::beefy_protocol_name::justifications_protocol_name`]. + pub justifications_protocol_name: ProtocolName, + + pub _phantom: PhantomData, +} + /// BEEFY gadget initialization parameters. pub struct BeefyParams where @@ -188,7 +178,7 @@ where C: Client, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { /// BEEFY client pub client: Arc, @@ -198,16 +188,16 @@ where pub runtime: Arc, /// Local key store pub key_store: Option, - /// Gossip network - pub network: N, + /// BEEFY voter network params + pub network_params: BeefyNetworkParams, /// Minimal delta between blocks, BEEFY should vote for pub min_block_delta: u32, /// Prometheus metric registry pub prometheus_registry: Option, - /// Chain specific GRANDPA protocol name. See [`beefy_protocol_name::standard_name`]. - pub protocol_name: ProtocolName, /// Links between the block importer, the background voter and the RPC layer. pub links: BeefyVoterLinks, + /// Handler for incoming BEEFY justifications requests from a remote peer. + pub on_demand_justifications_handler: BeefyJustifsRequestHandler, } /// Start the BEEFY gadget. @@ -217,32 +207,43 @@ pub async fn start_beefy_gadget(beefy_params: BeefyParams, - C: Client, + C: Client + BlockBackend, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { let BeefyParams { client, backend, runtime, key_store, - network, + network_params, min_block_delta, prometheus_registry, - protocol_name, links, + on_demand_justifications_handler, } = beefy_params; - let sync_oracle = network.clone(); - let gossip_validator = Arc::new(gossip::GossipValidator::new()); + let BeefyNetworkParams { network, gossip_protocol_name, justifications_protocol_name, .. } = + network_params; + + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let gossip_validator = + Arc::new(communication::gossip::GossipValidator::new(known_peers.clone())); let gossip_engine = sc_network_gossip::GossipEngine::new( - network, - protocol_name, + network.clone(), + gossip_protocol_name, gossip_validator.clone(), None, ); + let on_demand_justifications = OnDemandJustificationsEngine::new( + network.clone(), + runtime.clone(), + justifications_protocol_name, + known_peers.clone(), + ); + let metrics = prometheus_registry.as_ref().map(metrics::Metrics::register).and_then( |result| match result { @@ -261,10 +262,12 @@ where client, backend, runtime, - sync_oracle, + network, key_store: key_store.into(), + known_peers, gossip_engine, gossip_validator, + on_demand_justifications, links, metrics, min_block_delta, @@ -272,5 +275,5 @@ where let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); - worker.run().await + futures::future::join(worker.run(), on_demand_justifications_handler.run()).await; } diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index c96613eb38a95..45d346ccd85eb 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -33,7 +33,7 @@ use sp_runtime::traits::{Block, NumberFor}; /// whether the local `self` validator has voted/signed. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). -#[derive(Default)] +#[derive(Debug, Default)] struct RoundTracker { self_vote: bool, votes: HashMap, @@ -69,6 +69,7 @@ pub fn threshold(authorities: usize) -> usize { /// Only round numbers > `best_done` are of interest, all others are considered stale. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). +#[derive(Debug)] pub(crate) struct Rounds { rounds: BTreeMap<(Payload, NumberFor), RoundTracker>, session_start: NumberFor, @@ -135,7 +136,7 @@ where } } - pub(crate) fn try_conclude( + pub(crate) fn should_conclude( &mut self, round: &(P, NumberFor), ) -> Option>> { @@ -148,7 +149,6 @@ where if done { let signatures = self.rounds.remove(round)?.votes; - self.conclude(round.1); Some( self.validators() .iter() @@ -279,7 +279,7 @@ mod tests { true )); // round not concluded - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // self vote already present, should not self vote assert!(!rounds.should_self_vote(&round)); @@ -296,7 +296,7 @@ mod tests { (Keyring::Dave.public(), Keyring::Dave.sign(b"I am committed")), false )); - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // add 2nd good vote assert!(rounds.add_vote( @@ -305,7 +305,7 @@ mod tests { false )); // round not concluded - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // add 3rd good vote assert!(rounds.add_vote( @@ -314,7 +314,8 @@ mod tests { false )); // round concluded - assert!(rounds.try_conclude(&round).is_some()); + assert!(rounds.should_conclude(&round).is_some()); + rounds.conclude(round.1); // Eve is a validator, but round was concluded, adding vote disallowed assert!(!rounds.add_vote( @@ -432,11 +433,12 @@ mod tests { assert_eq!(3, rounds.rounds.len()); // conclude unknown round - assert!(rounds.try_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); + assert!(rounds.should_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); assert_eq!(3, rounds.rounds.len()); // conclude round 2 - let signatures = rounds.try_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); + let signatures = rounds.should_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); + rounds.conclude(2); assert_eq!(1, rounds.rounds.len()); assert_eq!( diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 3e49f4e05cc91..8057bd7cab7a5 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -21,10 +21,9 @@ use futures::{future, stream::FuturesUnordered, Future, StreamExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, sync::Arc, task::Poll}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, task::Poll}; use tokio::{runtime::Runtime, time::Duration}; -use sc_chain_spec::{ChainSpec, GenericChainSpec}; use sc_client_api::HeaderBackend; use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, @@ -33,7 +32,7 @@ use sc_consensus::{ use sc_keystore::LocalKeystore; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, - TestNetFactory, + PeersFullClient, TestNetFactory, }; use sc_utils::notification::NotificationReceiver; @@ -42,6 +41,7 @@ use beefy_primitives::{ BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; +use sc_network::{config::RequestResponseConfig, ProtocolName}; use sp_mmr_primitives::{ BatchProof, EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof, }; @@ -60,11 +60,21 @@ use sp_runtime::{ use substrate_test_runtime_client::{runtime::Header, ClientExt}; use crate::{ - beefy_block_import_and_links, beefy_protocol_name, justification::*, - keystore::tests::Keyring as BeefyKeyring, BeefyRPCLinks, BeefyVoterLinks, + beefy_block_import_and_links, + communication::request_response::{ + on_demand_justifications_protocol_config, BeefyJustifsRequestHandler, + }, + gossip_protocol_name, + justification::*, + keystore::tests::Keyring as BeefyKeyring, + BeefyRPCLinks, BeefyVoterLinks, }; -pub(crate) const BEEFY_PROTOCOL_NAME: &'static str = "/beefy/1"; +const GENESIS_HASH: H256 = H256::zero(); +fn beefy_gossip_proto_name() -> ProtocolName { + gossip_protocol_name(GENESIS_HASH, None) +} + const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); @@ -89,35 +99,12 @@ impl BuildStorage for Genesis { } } -#[test] -fn beefy_protocol_name() { - let chain_spec = GenericChainSpec::::from_json_bytes( - &include_bytes!("../../chain-spec/res/chain_spec.json")[..], - ) - .unwrap() - .cloned_box(); - - // Create protocol name using random genesis hash. - let genesis_hash = H256::random(); - let expected = format!("/{}/beefy/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); - let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); - assert_eq!(proto_name.to_string(), expected); - - // Create protocol name using hardcoded genesis hash. Verify exact representation. - let genesis_hash = [ - 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, 94, - 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, - ]; - let expected = - "/32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93/beefy/1".to_string(); - let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); - assert_eq!(proto_name.to_string(), expected); -} - #[derive(Default)] pub(crate) struct PeerData { pub(crate) beefy_rpc_links: Mutex>>, pub(crate) beefy_voter_links: Mutex>>, + pub(crate) beefy_justif_req_handler: + Mutex>>, } #[derive(Default)] @@ -126,23 +113,34 @@ pub(crate) struct BeefyTestNet { } impl BeefyTestNet { - pub(crate) fn new(n_authority: usize, n_full: usize) -> Self { - let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority + n_full) }; - for _ in 0..n_authority { - net.add_authority_peer(); - } - for _ in 0..n_full { - net.add_full_peer(); + pub(crate) fn new(n_authority: usize) -> Self { + let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority) }; + + for i in 0..n_authority { + let (rx, cfg) = on_demand_justifications_protocol_config(GENESIS_HASH, None); + let justif_protocol_name = cfg.name.clone(); + + net.add_authority_peer(vec![cfg]); + + let client = net.peers[i].client().as_client(); + let justif_handler = BeefyJustifsRequestHandler { + request_receiver: rx, + justif_protocol_name, + client, + _block: PhantomData, + }; + *net.peers[i].data.beefy_justif_req_handler.lock() = Some(justif_handler); } net } - pub(crate) fn add_authority_peer(&mut self) { + pub(crate) fn add_authority_peer(&mut self, req_resp_cfgs: Vec) { self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], + notifications_protocols: vec![beefy_gossip_proto_name()], + request_response_protocols: req_resp_cfgs, is_authority: true, ..Default::default() - }) + }); } pub(crate) fn generate_blocks_and_sync( @@ -198,6 +196,7 @@ impl TestNetFactory for BeefyTestNet { let peer_data = PeerData { beefy_rpc_links: Mutex::new(Some(rpc_links)), beefy_voter_links: Mutex::new(Some(voter_links)), + ..Default::default() }; (BlockImportAdapter::new(block_import), None, peer_data) } @@ -215,11 +214,8 @@ impl TestNetFactory for BeefyTestNet { } fn add_full_peer(&mut self) { - self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], - is_authority: false, - ..Default::default() - }) + // `add_authority_peer()` used instead. + unimplemented!() } } @@ -354,7 +350,7 @@ where API: ProvideRuntimeApi + Default + Sync + Send, API::Api: BeefyApi + MmrApi, { - let voters = FuturesUnordered::new(); + let tasks = FuturesUnordered::new(); for (peer_id, key, api) in peers.into_iter() { let peer = &net.peers[peer_id]; @@ -362,31 +358,40 @@ where let keystore = create_beefy_keystore(*key); let (_, _, peer_data) = net.make_block_import(peer.client().clone()); - let PeerData { beefy_rpc_links, beefy_voter_links } = peer_data; + let PeerData { beefy_rpc_links, beefy_voter_links, .. } = peer_data; let beefy_voter_links = beefy_voter_links.lock().take(); *peer.data.beefy_rpc_links.lock() = beefy_rpc_links.lock().take(); *peer.data.beefy_voter_links.lock() = beefy_voter_links.clone(); + let on_demand_justif_handler = peer.data.beefy_justif_req_handler.lock().take().unwrap(); + + let network_params = crate::BeefyNetworkParams { + network: peer.network_service().clone(), + gossip_protocol_name: beefy_gossip_proto_name(), + justifications_protocol_name: on_demand_justif_handler.protocol_name(), + _phantom: PhantomData, + }; + let beefy_params = crate::BeefyParams { client: peer.client().as_client(), backend: peer.client().as_backend(), runtime: api.clone(), key_store: Some(keystore), - network: peer.network_service().clone(), + network_params, links: beefy_voter_links.unwrap(), min_block_delta, prometheus_registry: None, - protocol_name: BEEFY_PROTOCOL_NAME.into(), + on_demand_justifications_handler: on_demand_justif_handler, }; - let gadget = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} - assert_send(&gadget); - voters.push(gadget); + assert_send(&task); + tasks.push(task); } - voters.for_each(|_| async move {}) + tasks.for_each(|_| async move {}) } fn block_until(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { @@ -404,18 +409,19 @@ fn run_for(duration: Duration, net: &Arc>, runtime: &mut Run pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, - peers: &[BeefyKeyring], + // peer index and key + peers: impl Iterator, ) -> (Vec>, Vec>>) { let mut best_block_streams = Vec::new(); let mut versioned_finality_proof_streams = Vec::new(); - for peer_id in 0..peers.len() { - let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); + peers.for_each(|(index, _)| { + let beefy_rpc_links = net.peer(index).data.beefy_rpc_links.lock().clone().unwrap(); let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = beefy_rpc_links; best_block_streams.push(from_voter_best_beefy_stream.subscribe()); versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); - } + }); (best_block_streams, versioned_finality_proof_streams) } @@ -493,18 +499,24 @@ fn streams_empty_after_timeout( fn finalize_block_and_wait_for_beefy( net: &Arc>, - peers: &[BeefyKeyring], + // peer index and key + peers: impl Iterator + Clone, runtime: &mut Runtime, finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); for block in finalize_targets { let finalize = BlockId::number(*block); - for i in 0..peers.len() { - net.lock().peer(i).client().as_client().finalize_block(finalize, None).unwrap(); - } + peers.clone().for_each(|(index, _)| { + net.lock() + .peer(index) + .client() + .as_client() + .finalize_block(finalize, None) + .unwrap(); + }) } if expected_beefy.is_empty() { @@ -524,12 +536,12 @@ fn beefy_finalizing_blocks() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 10; let min_block_delta = 4; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); @@ -542,17 +554,18 @@ fn beefy_finalizing_blocks() { // Minimum BEEFY block delta is 4. + let peers = peers.into_iter().enumerate(); // finalize block #5 -> BEEFY should finalize #1 (mandatory) and #5 from diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[5], &[1, 5]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[5], &[1, 5]); // GRANDPA finalize #10 -> BEEFY finalize #10 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[10]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[10], &[10]); // GRANDPA finalize #18 -> BEEFY finalize #14, then #18 (diff-power-of-two rule) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[18], &[14, 18]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[18], &[14, 18]); // GRANDPA finalize #20 -> BEEFY finalize #20 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[20], &[20]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[20], &[20]); // GRANDPA finalize #21 -> BEEFY finalize nothing (yet) because min delta is 4 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[21], &[]); @@ -563,12 +576,12 @@ fn lagging_validators() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 30; let min_block_delta = 1; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); @@ -578,13 +591,20 @@ fn lagging_validators() { let net = Arc::new(Mutex::new(net)); + let peers = peers.into_iter().enumerate(); // finalize block #15 -> BEEFY should finalize #1 (mandatory) and #9, #13, #14, #15 from // diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[15], &[1, 9, 13, 14, 15]); + finalize_block_and_wait_for_beefy( + &net, + peers.clone(), + &mut runtime, + &[15], + &[1, 9, 13, 14, 15], + ); // Alice finalizes #25, Bob lags behind let finalize = BlockId::number(25); - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); @@ -592,21 +612,21 @@ fn lagging_validators() { streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[30, 32], &[30, 31, 32]); // Verify that session-boundary votes get buffered by client and only processed once // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY @@ -617,7 +637,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); - // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 + // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); } @@ -627,13 +647,12 @@ fn correct_beefy_payload() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = - &[BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 20; let min_block_delta = 2; - let mut net = BeefyTestNet::new(4, 0); + let mut net = BeefyTestNet::new(4); // Alice, Bob, Charlie will vote on good payloads let good_api = Arc::new(four_validators::TestApi {}); @@ -649,15 +668,16 @@ fn correct_beefy_payload() { let bad_peers = vec![(3, &BeefyKeyring::Dave, bad_api)]; runtime.spawn(initialize_beefy(&mut net, bad_peers, min_block_delta)); - // push 10 blocks + // push 12 blocks net.generate_blocks_and_sync(12, session_len, &validator_set, false); let net = Arc::new(Mutex::new(net)); + let peers = peers.into_iter().enumerate(); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); // now 2 good validators and 1 bad one are voting net.lock() @@ -686,7 +706,7 @@ fn correct_beefy_payload() { // 3rd good validator catches up and votes as well let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); net.lock() .peer(2) .client() @@ -707,11 +727,11 @@ fn beefy_importing_blocks() { sp_tracing::try_init_simple(); - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let client = net.peer(0).client().clone(); let (mut block_import, _, peer_data) = net.make_block_import(client.clone()); - let PeerData { beefy_rpc_links: _, beefy_voter_links } = peer_data; + let PeerData { beefy_voter_links, .. } = peer_data; let justif_stream = beefy_voter_links.lock().take().unwrap().from_block_import_justif_stream; let params = |block: Block, justifications: Option| { @@ -826,18 +846,18 @@ fn voter_initialization() { // after waiting for BEEFY pallet availability. let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 5; // Should vote on all mandatory blocks no matter the `min_block_delta`. let min_block_delta = 10; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); - // push 30 blocks + // push 26 blocks net.generate_blocks_and_sync(26, session_len, &validator_set, false); let net = Arc::new(Mutex::new(net)); @@ -846,9 +866,90 @@ fn voter_initialization() { // Expect voters to pick up all of them and BEEFY-finalize the mandatory blocks of each session. finalize_block_and_wait_for_beefy( &net, - peers, + peers.into_iter().enumerate(), &mut runtime, &[1, 6, 10, 17, 24, 26], &[1, 5, 10, 15, 20, 25], ); } + +#[test] +fn on_demand_beefy_justification_sync() { + sp_tracing::try_init_simple(); + + let mut runtime = Runtime::new().unwrap(); + let all_peers = + [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; + let validator_set = ValidatorSet::new(make_beefy_ids(&all_peers), 0).unwrap(); + let session_len = 5; + let min_block_delta = 5; + + let mut net = BeefyTestNet::new(4); + + // Alice, Bob, Charlie start first and make progress through voting. + let api = Arc::new(four_validators::TestApi {}); + let fast_peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; + let voting_peers = + fast_peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); + runtime.spawn(initialize_beefy(&mut net, voting_peers, min_block_delta)); + + // Dave will start late and have to catch up using on-demand justification requests (since + // in this test there is no block import queue to automatically import justifications). + let dave = vec![(3, &BeefyKeyring::Dave, api)]; + // Instantiate but don't run Dave, yet. + let dave_task = initialize_beefy(&mut net, dave, min_block_delta); + let dave_index = 3; + + // push 30 blocks + net.generate_blocks_and_sync(30, session_len, &validator_set, false); + + let fast_peers = fast_peers.into_iter().enumerate(); + let net = Arc::new(Mutex::new(net)); + // With 3 active voters and one inactive, consensus should happen and blocks BEEFY-finalized. + // Need to finalize at least one block in each session, choose randomly. + finalize_block_and_wait_for_beefy( + &net, + fast_peers.clone(), + &mut runtime, + &[1, 6, 10, 17, 24], + &[1, 5, 10, 15, 20], + ); + + // Spawn Dave, he's now way behind voting and can only catch up through on-demand justif sync. + runtime.spawn(dave_task); + // give Dave a chance to spawn and init. + run_for(Duration::from_millis(400), &net, &mut runtime); + + let (dave_best_blocks, _) = + get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); + net.lock() + .peer(dave_index) + .client() + .as_client() + .finalize_block(BlockId::number(1), None) + .unwrap(); + // Give Dave task some cpu cycles to process the finality notification, + run_for(Duration::from_millis(100), &net, &mut runtime); + // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. + + // Have the other peers do some gossip so Dave finds out about their progress. + finalize_block_and_wait_for_beefy(&net, fast_peers, &mut runtime, &[25], &[25]); + + // Now verify Dave successfully finalized #1 (through on-demand justification request). + wait_for_best_beefy_blocks(dave_best_blocks, &net, &mut runtime, &[1]); + + // Give Dave all tasks some cpu cycles to burn through their events queues, + run_for(Duration::from_millis(100), &net, &mut runtime); + // then verify Dave catches up through on-demand justification requests. + finalize_block_and_wait_for_beefy( + &net, + [(dave_index, BeefyKeyring::Dave)].into_iter(), + &mut runtime, + &[6, 10, 17, 24, 26], + &[5, 10, 15, 20, 25], + ); + + let all_peers = all_peers.into_iter().enumerate(); + // Now that Dave has caught up, sanity check voting works for all of them. + finalize_block_and_wait_for_beefy(&net, all_peers, &mut runtime, &[30], &[30]); +} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 6e8c89d804984..832b43315515f 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -24,10 +24,15 @@ use std::{ }; use codec::{Codec, Decode, Encode}; -use futures::{stream::Fuse, StreamExt}; +use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; +use parking_lot::Mutex; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; +use sc_network_common::{ + protocol::event::Event as NetEvent, + service::{NetworkEventStream, NetworkRequest}, +}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; @@ -48,14 +53,17 @@ use beefy_primitives::{ }; use crate::{ + communication::{ + gossip::{topic, GossipValidator}, + request_response::outgoing_requests_engine::OnDemandJustificationsEngine, + }, error::Error, - gossip::{topic, GossipValidator}, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, round::Rounds, - BeefyVoterLinks, Client, + BeefyVoterLinks, Client, KnownPeers, }; enum RoundAction { @@ -113,6 +121,17 @@ impl VoterOracle { } } + /// Return current pending mandatory block, if any. + pub fn mandatory_pending(&self) -> Option> { + self.sessions.front().and_then(|round| { + if round.mandatory_done() { + None + } else { + Some(round.session_start()) + } + }) + } + /// Return `(A, B)` tuple representing inclusive [A, B] interval of votes to accept. pub fn accepted_interval( &self, @@ -175,29 +194,35 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { +pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, pub runtime: Arc, - pub sync_oracle: SO, + pub network: N, pub key_store: BeefyKeystore, + pub known_peers: Arc>>, pub gossip_engine: GossipEngine, pub gossip_validator: Arc>, + pub on_demand_justifications: OnDemandJustificationsEngine, pub links: BeefyVoterLinks, pub metrics: Option, pub min_block_delta: u32, } /// A BEEFY worker plays the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities client: Arc, backend: Arc, runtime: Arc, - sync_oracle: SO, + network: N, key_store: BeefyKeystore, + + // communication + known_peers: Arc>>, gossip_engine: GossipEngine, gossip_validator: Arc>, + on_demand_justifications: OnDemandJustificationsEngine, // channels /// Links between the block importer, the background voter and the RPC layer. @@ -218,14 +243,14 @@ pub(crate) struct BeefyWorker { voting_oracle: VoterOracle, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, C: Client, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - SO: SyncOracle + Send + Sync + Clone + 'static, + N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, { /// Return a new BEEFY worker instance. /// @@ -233,15 +258,17 @@ where /// BEEFY pallet has been deployed on-chain. /// /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { + pub(crate) fn new(worker_params: WorkerParams) -> Self { let WorkerParams { client, backend, runtime, key_store, - sync_oracle, + network, gossip_engine, gossip_validator, + on_demand_justifications, + known_peers, links, metrics, min_block_delta, @@ -256,10 +283,12 @@ where client: client.clone(), backend, runtime, - sync_oracle, + network, + known_peers, key_store, gossip_engine, gossip_validator, + on_demand_justifications, links, metrics, best_grandpa_block_header: last_finalized_header, @@ -366,8 +395,6 @@ where { if let Some(new_validator_set) = find_authorities_change::(&header) { self.init_session_at(new_validator_set, *header.number()); - // TODO (grandpa-bridge-gadget/issues/20): when adding SYNC protocol, - // fire up a request for justification for this mandatory block here. } } } @@ -408,7 +435,10 @@ where let block_num = signed_commitment.commitment.block_number; let best_grandpa = *self.best_grandpa_block_header.number(); match self.voting_oracle.triage_round(block_num, best_grandpa)? { - RoundAction::Process => self.finalize(justification)?, + RoundAction::Process => { + debug!(target: "beefy", "🥩 Process justification for round: {:?}.", block_num); + self.finalize(justification)? + }, RoundAction::Enqueue => { debug!(target: "beefy", "🥩 Buffer justification for round: {:?}.", block_num); self.pending_justifications.entry(block_num).or_insert(justification); @@ -429,7 +459,7 @@ where let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if rounds.add_vote(&round, vote, self_vote) { - if let Some(signatures) = rounds.try_conclude(&round) { + if let Some(signatures) = rounds.should_conclude(&round) { self.gossip_validator.conclude_round(round.1); let block_num = round.1; @@ -474,6 +504,8 @@ where self.best_beefy_block = Some(block_num); metric_set!(self, beefy_best_block, block_num); + self.on_demand_justifications.cancel_requests_older_than(block_num); + if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), (BEEFY_ENGINE_ID, finality_proof.clone().encode()), @@ -735,7 +767,7 @@ where let at = BlockId::hash(notif.header.hash()); if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { self.initialize_voter(¬if.header, active); - if !self.sync_oracle.is_major_syncing() { + if !self.network.is_major_syncing() { if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); } @@ -768,6 +800,7 @@ where self.wait_for_runtime_pallet(&mut finality_notifications).await; trace!(target: "beefy", "🥩 BEEFY pallet available, starting voter."); + let mut network_events = self.network.event_stream("network-gossip").fuse(); let mut votes = Box::pin( self.gossip_engine .messages_for(topic::()) @@ -788,15 +821,38 @@ where // The branches below only change 'state', actual voting happen afterwards, // based on the new resulting 'state'. futures::select_biased! { + // Use `select_biased!` to prioritize order below. + // Make sure to pump gossip engine. + _ = gossip_engine => { + error!(target: "beefy", "🥩 Gossip engine has terminated, closing worker."); + return; + }, + // Keep track of connected peers. + net_event = network_events.next() => { + if let Some(net_event) = net_event { + self.handle_network_event(net_event); + } else { + error!(target: "beefy", "🥩 Network events stream terminated, closing worker."); + return; + } + }, + // Process finality notifications first since these drive the voter. notification = finality_notifications.next() => { if let Some(notification) = notification { self.handle_finality_notification(¬ification); } else { + error!(target: "beefy", "🥩 Finality stream terminated, closing worker."); return; } }, - // TODO: when adding SYNC protocol, join the on-demand justifications stream to - // this one, and handle them both here. + // Process incoming justifications as these can make some in-flight votes obsolete. + justif = self.on_demand_justifications.next().fuse() => { + if let Some(justif) = justif { + if let Err(err) = self.triage_incoming_justif(justif) { + debug!(target: "beefy", "🥩 {}", err); + } + } + }, justif = block_import_justif.next() => { if let Some(justif) = justif { // Block import justifications have already been verified to be valid @@ -805,9 +861,11 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { + error!(target: "beefy", "🥩 Block import stream terminated, closing worker."); return; } }, + // Finally process incoming votes. vote = votes.next() => { if let Some(vote) = vote { // Votes have already been verified to be valid by the gossip validator. @@ -815,13 +873,10 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { + error!(target: "beefy", "🥩 Votes gossiping stream terminated, closing worker."); return; } }, - _ = gossip_engine => { - error!(target: "beefy", "🥩 Gossip engine has terminated."); - return; - } } // Handle pending justifications and/or votes for now GRANDPA finalized blocks. @@ -829,8 +884,14 @@ where debug!(target: "beefy", "🥩 {}", err); } - // Don't bother voting during major sync. - if !self.sync_oracle.is_major_syncing() { + // Don't bother voting or requesting justifications during major sync. + if !self.network.is_major_syncing() { + // If the current target is a mandatory block, + // make sure there's also an on-demand justification request out for it. + if let Some(block) = self.voting_oracle.mandatory_pending() { + // This only starts new request if there isn't already an active one. + self.on_demand_justifications.request(block); + } // There were external events, 'state' is changed, author a vote if needed/possible. if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); @@ -840,6 +901,20 @@ where } } } + + /// Update known peers based on network events. + fn handle_network_event(&mut self, event: NetEvent) { + match event { + NetEvent::SyncConnected { remote } => { + self.known_peers.lock().add_new(remote); + }, + NetEvent::SyncDisconnected { remote } => { + self.known_peers.lock().remove(&remote); + }, + // We don't care about other events. + _ => (), + } + } } /// Extract the MMR root hash from a digest in the given header, if it exists. @@ -932,11 +1007,11 @@ where pub(crate) mod tests { use super::*; use crate::{ + communication::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, keystore::tests::Keyring, - notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, - BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, + BeefyPeer, BeefyTestNet, }, BeefyRPCLinks, }; @@ -979,21 +1054,29 @@ pub(crate) mod tests { let api = Arc::new(TestApi {}); let network = peer.network_service().clone(); - let sync_oracle = network.clone(); - let gossip_validator = Arc::new(crate::gossip::GossipValidator::new()); + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let gossip_validator = Arc::new(GossipValidator::new(known_peers.clone())); let gossip_engine = - GossipEngine::new(network, BEEFY_PROTOCOL_NAME, gossip_validator.clone(), None); + GossipEngine::new(network.clone(), "/beefy/1", gossip_validator.clone(), None); + let on_demand_justifications = OnDemandJustificationsEngine::new( + network.clone(), + api.clone(), + "/beefy/justifs/1".into(), + known_peers.clone(), + ); let worker_params = crate::worker::WorkerParams { client: peer.client().as_client(), backend: peer.client().as_backend(), runtime: api, key_store: Some(keystore).into(), + known_peers, links, gossip_engine, gossip_validator, min_block_delta, metrics: None, - sync_oracle, + network, + on_demand_justifications, }; BeefyWorker::<_, _, _, _, _>::new(worker_params) } @@ -1245,7 +1328,7 @@ pub(crate) mod tests { fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); // keystore doesn't contain other keys than validators' @@ -1266,13 +1349,15 @@ pub(crate) mod tests { #[test] fn should_finalize_correctly() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let keys = [Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(&keys), 0).unwrap(); + let mut net = BeefyTestNet::new(1); let backend = net.peer(0).client().as_backend(); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); + let keys = keys.iter().cloned().enumerate(); + let (mut best_block_streams, mut finality_proofs) = + get_beefy_streams(&mut net, keys.clone()); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); let mut finality_proof = finality_proofs.drain(..).next().unwrap(); @@ -1294,7 +1379,8 @@ pub(crate) mod tests { })); // unknown hash for block #1 - let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = + get_beefy_streams(&mut net, keys.clone()); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); let mut finality_proof = finality_proofs.drain(..).next().unwrap(); let justif = create_finality_proof(1); @@ -1355,7 +1441,7 @@ pub(crate) mod tests { fn should_init_session() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); assert!(worker.voting_oracle.sessions.is_empty()); @@ -1389,7 +1475,7 @@ pub(crate) mod tests { fn should_triage_votes_and_process_later() { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); fn new_vote( @@ -1450,7 +1536,7 @@ pub(crate) mod tests { fn should_initialize_correct_voter() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 2f6b788e368b3..9d5abf98ceff0 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -48,7 +48,7 @@ use sc_consensus::{ Verifier, }; use sc_network::{ - config::{NetworkConfiguration, Role, SyncMode}, + config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ @@ -688,6 +688,8 @@ pub struct FullPeerConfig { pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. pub notifications_protocols: Vec, + /// List of request-response protocols that the network must support. + pub request_response_protocols: Vec, /// The indices of the peers the peer should be connected to. /// /// If `None`, it will be connected to all other peers. @@ -790,6 +792,9 @@ where network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + network_config + .request_response_protocols + .extend(config.request_response_protocols); network_config.extra_sets = config .notifications_protocols .into_iter() From 25795506052363e8b5795eb3526e61ef2a27d89a Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 3 Oct 2022 15:17:59 +0200 Subject: [PATCH 30/75] Fix `Weight::is_zero` (#12396) * Fix Weight::is_zero Signed-off-by: Oliver Tale-Yazdi * Add test Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- primitives/weights/src/weight_v2.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index a8eaf79a28711..8596a782c1fa7 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -318,7 +318,7 @@ impl Zero for Weight { } fn is_zero(&self) -> bool { - self.ref_time == 0 + self == &Self::zero() } } @@ -447,3 +447,16 @@ impl SubAssign for Weight { }; } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_zero_works() { + assert!(Weight::zero().is_zero()); + assert!(!Weight::from_components(1, 0).is_zero()); + assert!(!Weight::from_components(0, 1).is_zero()); + assert!(!Weight::MAX.is_zero()); + } +} From 1b23ec9f6d7880b358072b97d0030d3352cb20aa Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 3 Oct 2022 23:50:00 +0800 Subject: [PATCH 31/75] Remove unnecessary Clone trait bounds on CountedStorageMap (#12402) * Remove unnecessary Clone trait bounds on CountedStorageMap * cargo fmt --- .../support/src/storage/types/counted_map.rs | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index c4027acfe7232..8c19434767f49 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -143,10 +143,7 @@ where } /// Store a value to be associated with the given key from the map. - pub fn insert + Clone, ValArg: EncodeLike>( - key: KeyArg, - val: ValArg, - ) { + pub fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { if !::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_inc()); } @@ -154,7 +151,7 @@ where } /// Remove the value under a key. - pub fn remove + Clone>(key: KeyArg) { + pub fn remove>(key: KeyArg) { if ::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_dec()); } @@ -162,7 +159,7 @@ where } /// Mutate the value under a key. - pub fn mutate + Clone, R, F: FnOnce(&mut QueryKind::Query) -> R>( + pub fn mutate, R, F: FnOnce(&mut QueryKind::Query) -> R>( key: KeyArg, f: F, ) -> R { @@ -173,7 +170,7 @@ where /// Mutate the item, only if an `Ok` value is returned. pub fn try_mutate(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike + Clone, + KeyArg: EncodeLike, F: FnOnce(&mut QueryKind::Query) -> Result, { Self::try_mutate_exists(key, |option_value_ref| { @@ -187,7 +184,7 @@ where } /// Mutate the value under a key. Deletes the item if mutated to a `None`. - pub fn mutate_exists + Clone, R, F: FnOnce(&mut Option) -> R>( + pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( key: KeyArg, f: F, ) -> R { @@ -200,7 +197,7 @@ where /// or if the storage item does not exist (`None`), independent of the `QueryType`. pub fn try_mutate_exists(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike + Clone, + KeyArg: EncodeLike, F: FnOnce(&mut Option) -> Result, { ::Map::try_mutate_exists(key, |option_value| { @@ -222,7 +219,7 @@ where } /// Take the value under a key. - pub fn take + Clone>(key: KeyArg) -> QueryKind::Query { + pub fn take>(key: KeyArg) -> QueryKind::Query { let removed_value = ::Map::mutate_exists(key, |value| value.take()); if removed_value.is_some() { CounterFor::::mutate(|value| value.saturating_dec()); @@ -240,7 +237,7 @@ where /// `[item]`. Any default value set for the storage item will be ignored on overwrite. pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) where - EncodeLikeKey: EncodeLike + Clone, + EncodeLikeKey: EncodeLike, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageAppend, @@ -355,7 +352,7 @@ where /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> where - KArg: EncodeLike + Clone, + KArg: EncodeLike, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageTryAppend, From 6d7f76b5de00d4d4fdc55596abe86beb7d55f0b3 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 4 Oct 2022 09:58:05 +0300 Subject: [PATCH 32/75] docs/CODEOWNERS: add @acatangiu as MMR owner (#12406) --- docs/CODEOWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 0b9e6e7783058..cf2067d19450d 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -36,11 +36,13 @@ /client/consensus/pow/ @sorpaas /primitives/consensus/pow/ @sorpaas -# BEEFY +# BEEFY, MMR /client/beefy/ @acatangiu /frame/beefy/ @acatangiu /frame/beefy-mmr/ @acatangiu +/frame/merkle-mountain-range/ @acatangiu /primitives/beefy/ @acatangiu +/primitives/merkle-mountain-range/ @acatangiu # Contracts /frame/contracts/ @athei From 594d71afca8e70ed84297b01472bb1250d89ebd1 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 4 Oct 2022 17:01:50 +0900 Subject: [PATCH 33/75] Add @koute to `docs/CODEOWNERS` and update stale paths (#12408) --- docs/CODEOWNERS | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index cf2067d19450d..133ba7b094d43 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -24,8 +24,19 @@ /.gitlab-ci.yml @paritytech/ci # Sandboxing capability of Substrate Runtime -/primitives/sr-sandbox/ @pepyakin -/primitives/core/src/sandbox.rs @pepyakin +/primitives/sandbox/ @pepyakin @koute + +# WASM executor, low-level client <-> WASM interface and other WASM-related code +/client/executor/ @koute +/client/allocator/ @koute +/primitives/wasm-interface/ @koute +/primitives/runtime-interface/ @koute +/primitives/panic-handler/ @koute +/utils/wasm-builder/ @koute + +# Systems-related bits and bobs on the client side +/client/sysinfo/ @koute +/client/tracing/ @koute # GRANDPA, BABE, consensus stuff /frame/babe/ @andresilva From e77cbe39c4d1bfd978bb03c686fc9f60d86a3d06 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 4 Oct 2022 11:47:13 +0300 Subject: [PATCH 34/75] BEEFY: Simplify hashing for pallet-beefy-mmr (#12393) * beefy-mmr: reuse sp_runtime::traits::Keccak256 * beefy-mmr: use sp_runtime::traits:Hash for generating merkle proofs * beefy-mmr: use sp_runtime::traits:Hash for validating merkle proofs * beefy-mmr: remove primitives::Hasher and primitives::Hash * fixes * beefy-mmr: reduce the number of generic parameters for merkle_root() * fix * compute upper Vec capacity more accurately --- Cargo.lock | 2 +- frame/beefy-mmr/primitives/Cargo.toml | 8 +- frame/beefy-mmr/primitives/src/lib.rs | 236 ++++++++++++-------------- frame/beefy-mmr/src/lib.rs | 27 +-- frame/beefy-mmr/src/mock.rs | 3 +- 5 files changed, 118 insertions(+), 158 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 723a09ee9a39f..2f0a2df0f101b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -525,7 +525,7 @@ dependencies = [ "env_logger", "log", "sp-api", - "tiny-keccak", + "sp-runtime", ] [[package]] diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml index 1aa2573c7f680..a097da0fc30fd 100644 --- a/frame/beefy-mmr/primitives/Cargo.toml +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -11,10 +11,10 @@ homepage = "https://substrate.io" [dependencies] array-bytes = { version = "4.1", optional = true } log = { version = "0.4", default-features = false, optional = true } -tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } beefy-primitives = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/beefy" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] array-bytes = "4.1" @@ -22,9 +22,9 @@ env_logger = "0.9" [features] debug = ["array-bytes", "log"] -default = ["debug", "keccak", "std"] -keccak = ["tiny-keccak"] +default = ["debug", "std"] std = [ "beefy-primitives/std", - "sp-api/std" + "sp-api/std", + "sp-runtime/std" ] diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index 38831d7914715..f56be8bcafe5b 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -25,88 +25,49 @@ //! compilation targets. //! //! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the -//! same [Hasher] as the inner nodes. +//! same hasher as the inner nodes. //! Inner nodes are created by concatenating child hashes and hashing again. The implementation //! does not perform any sorting of the input data (leaves) nor when inner nodes are created. //! //! If the number of leaves is not even, last leave (hash of) is promoted to the upper layer. -#[cfg(not(feature = "std"))] -extern crate alloc; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; +pub use sp_runtime::traits::Keccak256; +use sp_runtime::{app_crypto::sp_core, sp_std, traits::Hash as HashT}; +use sp_std::{vec, vec::Vec}; use beefy_primitives::mmr::{BeefyAuthoritySet, BeefyNextAuthoritySet}; -/// Supported hashing output size. -/// -/// The size is restricted to 32 bytes to allow for a more optimised implementation. -pub type Hash = [u8; 32]; - -/// Generic hasher trait. -/// -/// Implement the function to support custom way of hashing data. -/// The implementation must return a [Hash](type@Hash) type, so only 32-byte output hashes are -/// supported. -pub trait Hasher { - /// Hash given arbitrary-length piece of data. - fn hash(data: &[u8]) -> Hash; -} - -#[cfg(feature = "keccak")] -mod keccak256 { - use tiny_keccak::{Hasher as _, Keccak}; - - /// Keccak256 hasher implementation. - pub struct Keccak256; - impl Keccak256 { - /// Hash given data. - pub fn hash(data: &[u8]) -> super::Hash { - ::hash(data) - } - } - impl super::Hasher for Keccak256 { - fn hash(data: &[u8]) -> super::Hash { - let mut keccak = Keccak::v256(); - keccak.update(data); - let mut output = [0_u8; 32]; - keccak.finalize(&mut output); - output - } - } -} -#[cfg(feature = "keccak")] -pub use keccak256::Keccak256; - /// Construct a root hash of a Binary Merkle Tree created from given leaves. /// /// See crate-level docs for details about Merkle Tree construction. /// /// In case an empty list of leaves is passed the function returns a 0-filled hash. -pub fn merkle_root(leaves: I) -> Hash +pub fn merkle_root(leaves: I) -> H::Output where - H: Hasher, - I: IntoIterator, - T: AsRef<[u8]>, + H: HashT, + H::Output: Default + AsRef<[u8]>, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let iter = leaves.into_iter().map(|l| H::hash(l.as_ref())); - merkelize::(iter, &mut ()) + let iter = leaves.into_iter().map(|l| ::hash(l.as_ref())); + merkelize::(iter, &mut ()).into() } -fn merkelize(leaves: I, visitor: &mut V) -> Hash +fn merkelize(leaves: I, visitor: &mut V) -> H::Output where - H: Hasher, - V: Visitor, - I: Iterator, + H: HashT, + H::Output: Default + AsRef<[u8]>, + V: Visitor, + I: Iterator, { - let upper = Vec::with_capacity(leaves.size_hint().0); + let upper = Vec::with_capacity((leaves.size_hint().1.unwrap_or(0).saturating_add(1)) / 2); let mut next = match merkelize_row::(leaves, upper, visitor) { Ok(root) => return root, - Err(next) if next.is_empty() => return Hash::default(), + Err(next) if next.is_empty() => return H::Output::default(), Err(next) => next, }; - let mut upper = Vec::with_capacity((next.len() + 1) / 2); + let mut upper = Vec::with_capacity((next.len().saturating_add(1)) / 2); loop { visitor.move_up(); @@ -125,14 +86,14 @@ where /// /// The structure contains all necessary data to later on verify the proof and the leaf itself. #[derive(Debug, PartialEq, Eq)] -pub struct MerkleProof { +pub struct MerkleProof { /// Root hash of generated merkle tree. - pub root: Hash, + pub root: H, /// Proof items (does not contain the leaf hash, nor the root obviously). /// /// This vec contains all inner node hashes necessary to reconstruct the root hash given the /// leaf hash. - pub proof: Vec, + pub proof: Vec, /// Number of leaves in the original tree. /// /// This is needed to detect a case where we have an odd number of leaves that "get promoted" @@ -141,14 +102,14 @@ pub struct MerkleProof { /// Index of the leaf the proof is for (0-based). pub leaf_index: usize, /// Leaf content. - pub leaf: T, + pub leaf: L, } /// A trait of object inspecting merkle root creation. /// /// It can be passed to [`merkelize_row`] or [`merkelize`] functions and will be notified /// about tree traversal. -trait Visitor { +trait Visitor { /// We are moving one level up in the tree. fn move_up(&mut self); @@ -158,13 +119,13 @@ trait Visitor { /// The method will also visit the `root` hash (level 0). /// /// The `index` is an index of `left` item. - fn visit(&mut self, index: usize, left: &Option, right: &Option); + fn visit(&mut self, index: usize, left: &Option, right: &Option); } /// No-op implementation of the visitor. -impl Visitor for () { +impl Visitor for () { fn move_up(&mut self) {} - fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} + fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} } /// Construct a Merkle Proof for leaves given by indices. @@ -177,16 +138,17 @@ impl Visitor for () { /// # Panic /// /// The function will panic if given `leaf_index` is greater than the number of leaves. -pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof +pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof where - H: Hasher, + H: HashT, + H::Output: Default + Copy + AsRef<[u8]>, I: IntoIterator, I::IntoIter: ExactSizeIterator, T: AsRef<[u8]>, { let mut leaf = None; let iter = leaves.into_iter().enumerate().map(|(idx, l)| { - let hash = H::hash(l.as_ref()); + let hash = ::hash(l.as_ref()); if idx == leaf_index { leaf = Some(l); } @@ -194,23 +156,23 @@ where }); /// The struct collects a proof for single leaf. - struct ProofCollection { - proof: Vec, + struct ProofCollection { + proof: Vec, position: usize, } - impl ProofCollection { + impl ProofCollection { fn new(position: usize) -> Self { ProofCollection { proof: Default::default(), position } } } - impl Visitor for ProofCollection { + impl Visitor for ProofCollection { fn move_up(&mut self) { self.position /= 2; } - fn visit(&mut self, index: usize, left: &Option, right: &Option) { + fn visit(&mut self, index: usize, left: &Option, right: &Option) { // we are at left branch - right goes to the proof. if self.position == index { if let Some(right) = right { @@ -238,7 +200,7 @@ where collect_proof .proof .iter() - .map(|s| array_bytes::bytes2hex("", s)) + .map(|s| array_bytes::bytes2hex("", s.as_ref())) .collect::>() ); @@ -250,25 +212,19 @@ where /// Can be either a value that needs to be hashed first, /// or the hash itself. #[derive(Debug, PartialEq, Eq)] -pub enum Leaf<'a> { +pub enum Leaf<'a, H> { /// Leaf content. Value(&'a [u8]), /// Hash of the leaf content. - Hash(Hash), + Hash(H), } -impl<'a, T: AsRef<[u8]>> From<&'a T> for Leaf<'a> { +impl<'a, H, T: AsRef<[u8]>> From<&'a T> for Leaf<'a, H> { fn from(v: &'a T) -> Self { Leaf::Value(v.as_ref()) } } -impl<'a> From for Leaf<'a> { - fn from(v: Hash) -> Self { - Leaf::Hash(v) - } -} - /// Verify Merkle Proof correctness versus given root hash. /// /// The proof is NOT expected to contain leaf hash as the first @@ -277,45 +233,47 @@ impl<'a> From for Leaf<'a> { /// /// The proof must not contain the root hash. pub fn verify_proof<'a, H, P, L>( - root: &'a Hash, + root: &'a H::Output, proof: P, number_of_leaves: usize, leaf_index: usize, leaf: L, ) -> bool where - H: Hasher, - P: IntoIterator, - L: Into>, + H: HashT, + H::Output: PartialEq + AsRef<[u8]>, + P: IntoIterator, + L: Into>, { if leaf_index >= number_of_leaves { return false } let leaf_hash = match leaf.into() { - Leaf::Value(content) => H::hash(content), + Leaf::Value(content) => ::hash(content), Leaf::Hash(hash) => hash, }; - let mut combined = [0_u8; 64]; + let hash_len = ::LENGTH; + let mut combined = vec![0_u8; hash_len * 2]; let mut position = leaf_index; let mut width = number_of_leaves; let computed = proof.into_iter().fold(leaf_hash, |a, b| { if position % 2 == 1 || position + 1 == width { - combined[0..32].copy_from_slice(&b); - combined[32..64].copy_from_slice(&a); + combined[..hash_len].copy_from_slice(&b.as_ref()); + combined[hash_len..].copy_from_slice(&a.as_ref()); } else { - combined[0..32].copy_from_slice(&a); - combined[32..64].copy_from_slice(&b); + combined[..hash_len].copy_from_slice(&a.as_ref()); + combined[hash_len..].copy_from_slice(&b.as_ref()); } - let hash = H::hash(&combined); + let hash = ::hash(&combined); #[cfg(feature = "debug")] log::debug!( "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", - array_bytes::bytes2hex("", &a), - array_bytes::bytes2hex("", &b), - array_bytes::bytes2hex("", &hash), - array_bytes::bytes2hex("", &combined) + array_bytes::bytes2hex("", &a.as_ref()), + array_bytes::bytes2hex("", &b.as_ref()), + array_bytes::bytes2hex("", &hash.as_ref()), + array_bytes::bytes2hex("", &combined.as_ref()) ); position /= 2; width = ((width - 1) / 2) + 1; @@ -332,20 +290,22 @@ where /// empty iterator) an `Err` with the inner nodes of upper layer is returned. fn merkelize_row( mut iter: I, - mut next: Vec, + mut next: Vec, visitor: &mut V, -) -> Result> +) -> Result> where - H: Hasher, - V: Visitor, - I: Iterator, + H: HashT, + H::Output: AsRef<[u8]>, + V: Visitor, + I: Iterator, { #[cfg(feature = "debug")] log::debug!("[merkelize_row]"); next.clear(); + let hash_len = ::LENGTH; let mut index = 0; - let mut combined = [0_u8; 64]; + let mut combined = vec![0_u8; hash_len * 2]; loop { let a = iter.next(); let b = iter.next(); @@ -354,17 +314,17 @@ where #[cfg(feature = "debug")] log::debug!( " {:?}\n {:?}", - a.as_ref().map(|s| array_bytes::bytes2hex("", s)), - b.as_ref().map(|s| array_bytes::bytes2hex("", s)) + a.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())), + b.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())) ); index += 2; match (a, b) { (Some(a), Some(b)) => { - combined[0..32].copy_from_slice(&a); - combined[32..64].copy_from_slice(&b); + combined[..hash_len].copy_from_slice(a.as_ref()); + combined[hash_len..].copy_from_slice(b.as_ref()); - next.push(H::hash(&combined)); + next.push(::hash(&combined)); }, // Odd number of items. Promote the item to the upper layer. (Some(a), None) if !next.is_empty() => { @@ -377,7 +337,7 @@ where #[cfg(feature = "debug")] log::debug!( "[merkelize_row] Next: {:?}", - next.iter().map(|s| array_bytes::bytes2hex("", s)).collect::>() + next.iter().map(|s| array_bytes::bytes2hex("", s.as_ref())).collect::>() ); return Err(next) }, @@ -389,7 +349,6 @@ sp_api::decl_runtime_apis! { /// API useful for BEEFY light clients. pub trait BeefyMmrApi where - H: From + Into, BeefyAuthoritySet: sp_api::Decode, { /// Return the currently active BEEFY authority set proof. @@ -403,6 +362,7 @@ sp_api::decl_runtime_apis! { #[cfg(test)] mod tests { use super::*; + use crate::sp_core::H256; #[test] fn should_generate_empty_root() { @@ -411,11 +371,11 @@ mod tests { let data: Vec<[u8; 1]> = Default::default(); // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "0000000000000000000000000000000000000000000000000000000000000000" ); } @@ -429,11 +389,11 @@ mod tests { )]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" ); } @@ -448,11 +408,11 @@ mod tests { ]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" ); } @@ -461,7 +421,10 @@ mod tests { fn should_generate_root_complex() { let _ = env_logger::try_init(); let test = |root, data| { - assert_eq!(array_bytes::bytes2hex("", &merkle_root::(data)), root); + assert_eq!( + array_bytes::bytes2hex("", &merkle_root::(data).as_ref()), + root + ); }; test( @@ -521,18 +484,19 @@ mod tests { // then assert_eq!( - array_bytes::bytes2hex("", &proof0.root), - array_bytes::bytes2hex("", &proof1.root) + array_bytes::bytes2hex("", &proof0.root.as_ref()), + array_bytes::bytes2hex("", &proof1.root.as_ref()) ); assert_eq!( - array_bytes::bytes2hex("", &proof2.root), - array_bytes::bytes2hex("", &proof1.root) + array_bytes::bytes2hex("", &proof2.root.as_ref()), + array_bytes::bytes2hex("", &proof1.root.as_ref()) ); assert!(!verify_proof::( &array_bytes::hex2array_unchecked( "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239" - ), + ) + .into(), proof0.proof, data.len(), proof0.leaf_index, @@ -540,7 +504,7 @@ mod tests { )); assert!(!verify_proof::( - &proof0.root, + &proof0.root.into(), vec![], data.len(), proof0.leaf_index, @@ -796,9 +760,10 @@ mod tests { "0xA4cDc98593CE52d01Fe5Ca47CB3dA5320e0D7592", "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", ]; - let root = array_bytes::hex2array_unchecked( + let root: H256 = array_bytes::hex2array_unchecked( "72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53", - ); + ) + .into(); let data = addresses .into_iter() @@ -808,7 +773,10 @@ mod tests { for l in 0..data.len() { // when let proof = merkle_proof::(data.clone(), l); - assert_eq!(array_bytes::bytes2hex("", &proof.root), array_bytes::bytes2hex("", &root)); + assert_eq!( + array_bytes::bytes2hex("", &proof.root.as_ref()), + array_bytes::bytes2hex("", &root.as_ref()) + ); assert_eq!(proof.leaf_index, l); assert_eq!(&proof.leaf, &data[l]); @@ -831,16 +799,20 @@ mod tests { proof: vec![ array_bytes::hex2array_unchecked( "340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f" - ), + ) + .into(), array_bytes::hex2array_unchecked( "ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f" - ), + ) + .into(), array_bytes::hex2array_unchecked( "d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79" - ), + ) + .into(), array_bytes::hex2array_unchecked( "ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e" - ), + ) + .into(), ], number_of_leaves: data.len(), leaf_index: data.len() - 1, diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 456d6e77aa8eb..5b82c89ce84b6 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -33,7 +33,7 @@ //! //! and thanks to versioning can be easily updated in the future. -use sp_runtime::traits::{Convert, Hash, Member}; +use sp_runtime::traits::{Convert, Member}; use sp_std::prelude::*; use beefy_primitives::{ @@ -142,10 +142,7 @@ pub mod pallet { StorageValue<_, BeefyNextAuthoritySet>, ValueQuery>; } -impl LeafDataProvider for Pallet -where - MerkleRootOf: From + Into, -{ +impl LeafDataProvider for Pallet { type LeafData = MmrLeaf< ::BlockNumber, ::Hash, @@ -163,19 +160,9 @@ where } } -impl beefy_merkle_tree::Hasher for Pallet -where - MerkleRootOf: Into, -{ - fn hash(data: &[u8]) -> beefy_merkle_tree::Hash { - ::Hashing::hash(data).into() - } -} - impl beefy_primitives::OnNewValidatorSet<::BeefyId> for Pallet where T: pallet::Config, - MerkleRootOf: From + Into, { /// Compute and cache BEEFY authority sets based on updated BEEFY validator sets. fn on_new_validator_set( @@ -190,10 +177,7 @@ where } } -impl Pallet -where - MerkleRootOf: From + Into, -{ +impl Pallet { /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { Pallet::::beefy_authorities() @@ -220,7 +204,10 @@ where .map(T::BeefyAuthorityToMerkleLeaf::convert) .collect::>(); let len = beefy_addresses.len() as u32; - let root = beefy_merkle_tree::merkle_root::(beefy_addresses).into(); + let root = beefy_merkle_tree::merkle_root::<::Hashing, _>( + beefy_addresses, + ) + .into(); BeefyAuthoritySet { id, len, root } } } diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index 602d0aa5fe1a6..0a64ad3fc9976 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -147,9 +147,10 @@ impl BeefyDataProvider> for DummyDataProvider { fn extra_data() -> Vec { let mut col = vec![(15, vec![1, 2, 3]), (5, vec![4, 5, 6])]; col.sort(); - beefy_merkle_tree::merkle_root::, _, _>( + beefy_merkle_tree::merkle_root::<::Hashing, _>( col.into_iter().map(|pair| pair.encode()), ) + .as_ref() .to_vec() } } From 005195011f303bf5e2e468dbf379dd42971bf6ae Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 4 Oct 2022 14:34:54 +0300 Subject: [PATCH 35/75] client/beefy: small code improvements (#12414) * client/beefy: remove bounds on type definitions * client/beefy: remove gossip protocol legacy name * client/beefy: simplify justification request response engine Signed-off-by: Adrian Catangiu --- client/beefy/src/communication/mod.rs | 5 --- .../outgoing_requests_engine.rs | 34 ++++++++----------- client/beefy/src/lib.rs | 16 ++------- 3 files changed, 17 insertions(+), 38 deletions(-) diff --git a/client/beefy/src/communication/mod.rs b/client/beefy/src/communication/mod.rs index 93646677c0ecd..91798d4ae0d33 100644 --- a/client/beefy/src/communication/mod.rs +++ b/client/beefy/src/communication/mod.rs @@ -33,9 +33,6 @@ pub(crate) mod beefy_protocol_name { /// BEEFY justifications protocol name suffix. const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; - /// Old names for the gossip protocol, used for backward compatibility. - pub(super) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; - /// Name of the votes gossip protocol used by BEEFY. /// /// Must be registered towards the networking in order for BEEFY voter to properly function. @@ -73,9 +70,7 @@ pub fn beefy_peers_set_config( ) -> sc_network_common::config::NonDefaultSetConfig { let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); - cfg.allow_non_reserved(25, 25); - cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); cfg } diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs index e22958e19cd2e..c4d3c926190e6 100644 --- a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -20,10 +20,7 @@ use beefy_primitives::{crypto::AuthorityId, BeefyApi, ValidatorSet}; use codec::Encode; -use futures::{ - channel::{oneshot, oneshot::Canceled}, - stream::{self, StreamExt}, -}; +use futures::channel::{oneshot, oneshot::Canceled}; use log::{debug, error, warn}; use parking_lot::Mutex; use sc_network::{PeerId, ProtocolName}; @@ -50,8 +47,8 @@ type Response = Result, RequestFailure>; type ResponseReceiver = oneshot::Receiver; enum State { - Idle(stream::Pending>), - AwaitingResponse(PeerId, NumberFor, stream::Once), + Idle, + AwaitingResponse(PeerId, NumberFor, ResponseReceiver), } pub struct OnDemandJustificationsEngine { @@ -83,7 +80,7 @@ where protocol_name, live_peers, peers_cache: VecDeque::new(), - state: State::Idle(stream::pending()), + state: State::Idle, } } @@ -118,15 +115,14 @@ where IfDisconnected::ImmediateError, ); - self.state = State::AwaitingResponse(peer, block, stream::once(rx)); + self.state = State::AwaitingResponse(peer, block, rx); } /// If no other request is in progress, start new justification request for `block`. pub fn request(&mut self, block: NumberFor) { // ignore new requests while there's already one pending - match &self.state { - State::AwaitingResponse(_, _, _) => return, - State::Idle(_) => (), + if matches!(self.state, State::AwaitingResponse(_, _, _)) { + return } self.reset_peers_cache_for_block(block); @@ -148,7 +144,7 @@ where "🥩 cancel pending request for justification #{:?}", number ); - self.state = State::Idle(stream::pending()); + self.state = State::Idle; }, _ => (), } @@ -194,19 +190,19 @@ where pub async fn next(&mut self) -> Option> { let (peer, block, resp) = match &mut self.state { - State::Idle(pending) => { - let _ = pending.next().await; - // This never happens since 'stream::pending' never generates any items. + State::Idle => { + futures::pending!(); + // Doesn't happen as 'futures::pending!()' is an 'await' barrier that never passes. return None }, State::AwaitingResponse(peer, block, receiver) => { - let resp = receiver.next().await?; + let resp = receiver.await; (*peer, *block, resp) }, }; - // We received the awaited response. Our 'stream::once()' receiver will never generate any - // other response, meaning we're done with current state. Move the engine to `State::Idle`. - self.state = State::Idle(stream::pending()); + // We received the awaited response. Our 'receiver' will never generate any other response, + // meaning we're done with current state. Move the engine to `State::Idle`. + self.state = State::Idle; let block_id = BlockId::number(block); let validator_set = self diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 7407f101e99a5..760fc753b18a3 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -153,11 +153,7 @@ where } /// BEEFY gadget network parameters. -pub struct BeefyNetworkParams -where - B: Block, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, -{ +pub struct BeefyNetworkParams { /// Network implementing gossip, requests and sync-oracle. pub network: Arc, /// Chain specific BEEFY gossip protocol name. See @@ -171,15 +167,7 @@ where } /// BEEFY gadget initialization parameters. -pub struct BeefyParams -where - B: Block, - BE: Backend, - C: Client, - R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, -{ +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend From 07e5ec5eb8a9c470bf7dc9afbdb3ac0579be4f4b Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Tue, 4 Oct 2022 14:26:14 +0200 Subject: [PATCH 36/75] [Fix] Rename VoterBagsList -> VoterList to match pdot (#12416) --- bin/node/runtime/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f0c68b5b225cd..c2d29731ea2e6 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -569,7 +569,7 @@ impl pallet_staking::Config for Runtime { type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::UnboundedExecution; - type VoterList = VoterBagsList; + type VoterList = VoterList; // This a placeholder, to be introduced in the next PR as an instance of bags-list type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; @@ -1651,7 +1651,7 @@ construct_runtime!( Gilt: pallet_gilt, Uniques: pallet_uniques, TransactionStorage: pallet_transaction_storage, - VoterBagsList: pallet_bags_list::, + VoterList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, ChildBounties: pallet_child_bounties, Referenda: pallet_referenda, @@ -1739,7 +1739,7 @@ mod benches { [pallet_alliance, Alliance] [pallet_assets, Assets] [pallet_babe, Babe] - [pallet_bags_list, VoterBagsList] + [pallet_bags_list, VoterList] [pallet_balances, Balances] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] From d11dd02dda6a00800a13cd59a0c2f07ac75e082d Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Tue, 4 Oct 2022 15:15:57 +0200 Subject: [PATCH 37/75] Use saturating add for alliance::disband witness data (#12418) --- frame/alliance/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 24111b44ced9e..fca17e69c7652 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -706,7 +706,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::disband( witness.voting_members, witness.ally_members, - witness.voting_members + witness.ally_members, + witness.voting_members.saturating_add(witness.ally_members), ))] pub fn disband( origin: OriginFor, From 91d072df4273bbc04f8152099b23a66a0c1d531b Mon Sep 17 00:00:00 2001 From: Chevdor Date: Tue, 4 Oct 2022 21:30:45 +0200 Subject: [PATCH 38/75] Bump prost to 0.11+ (#12419) --- Cargo.lock | 16 ++++++++-------- client/authority-discovery/Cargo.toml | 4 ++-- client/network/Cargo.toml | 2 +- client/network/common/Cargo.toml | 2 +- client/network/light/Cargo.toml | 4 ++-- client/network/sync/Cargo.toml | 4 ++-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f0a2df0f101b..a35dbba7d089e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7830,8 +7830,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8486,7 +8486,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "pin-project", - "prost 0.10.3", + "prost 0.11.0", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8553,7 +8553,7 @@ dependencies = [ "libp2p", "linked_hash_set", "parity-scale-codec", - "prost-build 0.10.4", + "prost-build 0.11.1", "sc-consensus", "sc-peerset", "serde", @@ -8595,8 +8595,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "sc-client-api", "sc-network-common", "sc-peerset", @@ -8617,8 +8617,8 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "quickcheck", "sc-block-builder", "sc-client-api", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index d9e9df4f2a97c..37377cdc6dde3 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -23,7 +23,7 @@ futures-timer = "3.0.1" ip_network = "0.4.1" libp2p = { version = "0.46.1", default-features = false, features = ["kad"] } log = "0.4.17" -prost = "0.10" +prost = "0.11" rand = "0.7.2" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index e96749df40aa2..8e3d68851c423 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -33,7 +33,7 @@ log = "0.4.17" lru = "0.7.5" parking_lot = "0.12.1" pin-project = "1.0.10" -prost = "0.10" +prost = "0.11" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 1ee7b15538366..0e9801ec79e63 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] async-trait = "0.1.57" diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index c2a77c3b577ba..a1a5dcf85eb5d 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] array-bytes = "4.1" @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.16" -prost = "0.10" +prost = "0.11" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-network-common = { version = "0.10.0-dev", path = "../common" } diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 269214aeff3f7..24d418f7233d7 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] array-bytes = "4.1" @@ -25,7 +25,7 @@ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" -prost = "0.10" +prost = "0.11" smallvec = "1.8.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } From 241b0d0455453499763d0db0b4ea4188012b372f Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 5 Oct 2022 00:16:07 +0200 Subject: [PATCH 39/75] Improved election pallet testing (#12327) * Improved election pallet testing * fmt * remove comment * more checks * fixes in logic * roll_to_signed * switch to roll_to_signed * Update frame/election-provider-multi-phase/src/mock.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove useless checks * remove warning * add checks to signed.rs * add some checks to unsigned.rs * fmt * use roll_to_signed and roll_to_unsigned * remove nonsense * remove even more nonsense * fix * fix * remove useless checks Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: parity-processbot <> --- .../election-provider-multi-phase/src/lib.rs | 161 +++++++++++++++--- .../election-provider-multi-phase/src/mock.rs | 11 ++ .../src/signed.rs | 130 +++++++++++--- .../src/unsigned.rs | 75 +++++--- 4 files changed, 305 insertions(+), 72 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index fb17bd25ea541..3dc6161bb202a 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1842,9 +1842,9 @@ mod tests { use super::*; use crate::{ mock::{ - multi_phase_events, raw_solution, roll_to, AccountId, ExtBuilder, MockWeightInfo, - MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxSubmissions, System, - TargetIndex, Targets, + multi_phase_events, raw_solution, roll_to, roll_to_signed, roll_to_unsigned, AccountId, + ExtBuilder, MockWeightInfo, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, + SignedMaxSubmissions, System, TargetIndex, Targets, }, Phase, }; @@ -1868,7 +1868,7 @@ mod tests { assert!(MultiPhase::snapshot().is_none()); assert_eq!(MultiPhase::round(), 1); - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert!(MultiPhase::snapshot().is_some()); @@ -1879,7 +1879,7 @@ mod tests { assert!(MultiPhase::snapshot().is_some()); assert_eq!(MultiPhase::round(), 1); - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert_eq!( multi_phase_events(), @@ -1912,11 +1912,29 @@ mod tests { roll_to(44); assert!(MultiPhase::current_phase().is_off()); - roll_to(45); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); roll_to(55); assert!(MultiPhase::current_phase().is_unsigned_open_at(55)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + }, + Event::SignedPhaseStarted { round: 2 }, + Event::UnsignedPhaseStarted { round: 2 } + ] + ); }) } @@ -1940,6 +1958,21 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }); } @@ -1952,7 +1985,7 @@ mod tests { roll_to(19); assert!(MultiPhase::current_phase().is_off()); - roll_to(20); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert!(MultiPhase::snapshot().is_some()); @@ -1963,6 +1996,21 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ) }); } @@ -1985,6 +2033,14 @@ mod tests { assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); + + assert_eq!( + multi_phase_events(), + vec![Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } + }] + ); }); } @@ -1993,16 +2049,13 @@ mod tests { // An early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // Signed phase started at block 15 and will end at 25. - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to(15); + roll_to_signed(); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); // An unexpected call to elect. - roll_to(20); assert_ok!(MultiPhase::elect()); // We surely can't have any feasible solutions. This will cause an on-chain election. @@ -2031,10 +2084,8 @@ mod tests { // an early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // signed phase started at block 15 and will end at 25. - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to(15); + roll_to_signed(); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); @@ -2052,7 +2103,6 @@ mod tests { } // an unexpected call to elect. - roll_to(20); assert_ok!(MultiPhase::elect()); // all storage items must be cleared. @@ -2062,16 +2112,38 @@ mod tests { assert!(MultiPhase::desired_targets().is_none()); assert!(MultiPhase::queued_solution().is_none()); assert!(MultiPhase::signed_submissions().is_empty()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }) } #[test] fn check_events_with_compute_signed() { ExtBuilder::default().build_and_execute(|| { - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -2106,7 +2178,7 @@ mod tests { #[test] fn check_events_with_compute_unsigned() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // ensure we have snapshots in place. @@ -2125,7 +2197,6 @@ mod tests { )); assert!(MultiPhase::queued_solution().is_some()); - roll_to(30); assert_ok!(MultiPhase::elect()); assert_eq!( @@ -2153,7 +2224,7 @@ mod tests { #[test] fn fallback_strategy_works() { ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far, but we get a result. @@ -2166,11 +2237,27 @@ mod tests { (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) ] - ) + ); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }); ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2178,13 +2265,22 @@ mod tests { assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); // phase is now emergency. assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFailed + ] + ); }) } #[test] fn governance_fallback_works() { ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2243,9 +2339,16 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Off); // On-chain backup works though. - roll_to(29); let supports = MultiPhase::elect().unwrap(); assert!(supports.len() > 0); + + assert_eq!( + multi_phase_events(), + vec![Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } + }] + ); }); } @@ -2269,6 +2372,8 @@ mod tests { let err = MultiPhase::elect().unwrap_err(); assert_eq!(err, ElectionError::Fallback("NoFallback.")); assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + + assert_eq!(multi_phase_events(), vec![Event::ElectionFailed]); }); } @@ -2282,7 +2387,7 @@ mod tests { crate::mock::MaxElectingVoters::set(2); // Signed phase opens just fine. - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!( @@ -2295,7 +2400,7 @@ mod tests { #[test] fn untrusted_score_verification_is_respected() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); // set the solution balancing to get the desired score. diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index d3082be0cf750..2615d863c91e0 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -99,6 +99,17 @@ pub fn roll_to(n: BlockNumber) { } } +pub fn roll_to_unsigned() { + while !matches!(MultiPhase::current_phase(), Phase::Unsigned(_)) { + roll_to(System::block_number() + 1); + } +} +pub fn roll_to_signed() { + while !matches!(MultiPhase::current_phase(), Phase::Signed) { + roll_to(System::block_number() + 1); + } +} + pub fn roll_to_with_ocw(n: BlockNumber) { let now = System::block_number(); for i in now + 1..=n { diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 2e01d99be0a42..175c92757f35e 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -528,10 +528,11 @@ mod tests { use super::*; use crate::{ mock::{ - balances, raw_solution, roll_to, Balances, ExtBuilder, MockedWeightInfo, MultiPhase, - Runtime, RuntimeOrigin, SignedMaxRefunds, SignedMaxSubmissions, SignedMaxWeight, + balances, multi_phase_events, raw_solution, roll_to, roll_to_signed, Balances, + ExtBuilder, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxRefunds, + SignedMaxSubmissions, SignedMaxWeight, }, - Error, Perbill, Phase, + Error, Event, Perbill, Phase, }; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; @@ -555,7 +556,7 @@ mod tests { #[test] fn should_pay_deposit() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -565,13 +566,21 @@ mod tests { assert_eq!(balances(&99), (95, 5)); assert_eq!(MultiPhase::signed_submissions().iter().next().unwrap().deposit, 5); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false } + ] + ); }) } #[test] fn good_solution_is_rewarded() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -582,13 +591,22 @@ mod tests { assert!(MultiPhase::finalize_signed_phase()); assert_eq!(balances(&99), (100 + 7 + 8, 0)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } #[test] fn bad_solution_is_slashed() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -604,13 +622,22 @@ mod tests { assert!(!MultiPhase::finalize_signed_phase()); // and the bond is gone. assert_eq!(balances(&99), (95, 0)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 99, value: 5 } + ] + ); }) } #[test] fn suppressed_solution_gets_bond_back() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -633,13 +660,22 @@ mod tests { assert_eq!(balances(&99), (100 + 7 + 8, 0)); // 999 gets everything back, including the call fee. assert_eq!(balances(&999), (100 + 8, 0)); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } #[test] fn cannot_submit_worse_with_full_queue() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -667,7 +703,7 @@ mod tests { #[test] fn call_fee_refund_is_limited_by_signed_max_refunds() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(SignedMaxRefunds::get(), 1); assert!(SignedMaxSubmissions::get() > 2); @@ -683,7 +719,7 @@ mod tests { assert_eq!(balances(&account), (95, 5)); } - assert!(MultiPhase::finalize_signed_phase()); + assert_ok!(MultiPhase::do_elect()); for s in 0..SignedMaxSubmissions::get() { let account = 99 + s as u64; @@ -699,6 +735,26 @@ mod tests { assert_eq!(balances(&account), (100, 0)); } } + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 }, + Event::ElectionFinalized { + compute: ElectionCompute::Signed, + score: ElectionScore { + minimal_stake: 40, + sum_stake: 100, + sum_stake_squared: 5200 + } + } + ] + ); }); } @@ -708,7 +764,7 @@ mod tests { .signed_max_submission(1) .better_signed_threshold(Perbill::from_percent(20)) .build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = RawSolution { @@ -747,13 +803,27 @@ mod tests { }; assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { + compute: ElectionCompute::Signed, + prev_ejected: false + }, + Event::SolutionStored { + compute: ElectionCompute::Signed, + prev_ejected: true + } + ] + ); }) } #[test] fn weakest_is_removed_if_better_provided() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -800,7 +870,7 @@ mod tests { #[test] fn replace_weakest_works() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 1..SignedMaxSubmissions::get() { @@ -847,7 +917,7 @@ mod tests { #[test] fn early_ejected_solution_gets_bond_back() { ExtBuilder::default().signed_deposit(2, 0, 0).build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -878,7 +948,7 @@ mod tests { #[test] fn equally_good_solution_is_not_accepted() { ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for i in 0..SignedMaxSubmissions::get() { @@ -915,7 +985,7 @@ mod tests { // - bad_solution_is_slashed // - suppressed_solution_gets_bond_back ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(balances(&99), (100, 0)); @@ -951,6 +1021,17 @@ mod tests { assert_eq!(balances(&999), (95, 0)); // 9999 gets everything back, including the call fee. assert_eq!(balances(&9999), (100 + 8, 0)); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 999, value: 5 }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } @@ -960,7 +1041,7 @@ mod tests { .signed_weight(Weight::from_ref_time(40).set_proof_size(u64::MAX)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -994,7 +1075,7 @@ mod tests { #[test] fn insufficient_deposit_does_not_store_submission() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -1014,7 +1095,7 @@ mod tests { #[test] fn insufficient_deposit_with_full_queue_works_properly() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -1060,7 +1141,7 @@ mod tests { #[test] fn finalize_signed_phase_is_idempotent_given_submissions() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -1073,6 +1154,15 @@ mod tests { // calling it again doesn't change anything assert_storage_noop!(MultiPhase::finalize_signed_phase()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } } diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 025ff832bb08a..7340605dfe621 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -1050,11 +1050,12 @@ mod tests { use super::*; use crate::{ mock::{ - roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, - MinerMaxWeight, MultiPhase, Runtime, RuntimeCall, RuntimeOrigin, System, - TestNposSolution, TrimHelpers, UnsignedPhase, + multi_phase_events, roll_to, roll_to_signed, roll_to_unsigned, roll_to_with_ocw, + trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, + Runtime, RuntimeCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, + UnsignedPhase, }, - CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, + CurrentPhase, Event, InvalidTransaction, Phase, QueuedSolution, TransactionSource, TransactionValidityError, }; use codec::Decode; @@ -1100,7 +1101,7 @@ mod tests { )); // signed - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert!(matches!( ::validate_unsigned( @@ -1116,7 +1117,7 @@ mod tests { )); // unsigned - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); assert!(::validate_unsigned( @@ -1147,7 +1148,7 @@ mod tests { #[test] fn validate_unsigned_retracts_low_score() { ExtBuilder::default().desired_targets(0).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1193,7 +1194,7 @@ mod tests { #[test] fn validate_unsigned_retracts_incorrect_winner_count() { ExtBuilder::default().desired_targets(1).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let raw = RawSolution:: { @@ -1222,7 +1223,7 @@ mod tests { .miner_tx_priority(20) .desired_targets(0) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1253,7 +1254,7 @@ mod tests { Some(\"PreDispatchWrongWinnerCount\") })")] fn unfeasible_solution_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // This is in itself an invalid BS solution. @@ -1275,7 +1276,7 @@ mod tests { deprive validator from their authoring reward.")] fn wrong_witness_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // This solution is unfeasible as well, but we won't even get there. @@ -1299,7 +1300,7 @@ mod tests { #[test] fn miner_works() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // ensure we have snapshots in place. @@ -1317,6 +1318,17 @@ mod tests { witness )); assert!(MultiPhase::queued_solution().is_some()); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::SolutionStored { + compute: ElectionCompute::Unsigned, + prev_ejected: false + } + ] + ); }) } @@ -1326,7 +1338,7 @@ mod tests { .miner_weight(Weight::from_ref_time(100).set_proof_size(u64::MAX)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -1360,7 +1372,7 @@ mod tests { fn miner_will_not_submit_if_not_enough_winners() { let (mut ext, _) = ExtBuilder::default().desired_targets(8).build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // Force the number of winners to be bigger to fail @@ -1386,7 +1398,7 @@ mod tests { .add_voter(8, 5, bounded_vec![10]) .better_unsigned_threshold(Perbill::from_percent(50)) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); assert_eq!(MultiPhase::desired_targets().unwrap(), 1); @@ -1488,7 +1500,7 @@ mod tests { ext.execute_with(|| { let offchain_repeat = ::OffchainRepeat::get(); - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // first execution -- okay. @@ -1529,7 +1541,7 @@ mod tests { let guard = StorageValueRef::persistent(&OFFCHAIN_LOCK); let last_block = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // initially, the lock is not set. @@ -1550,7 +1562,7 @@ mod tests { // ensure that if the guard is in hold, a new execution is not allowed. let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // artificially set the value, as if another thread is mid-way. @@ -1578,7 +1590,7 @@ mod tests { fn ocw_only_runs_when_unsigned_open_now() { let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // we must clear the offchain storage to ensure the offchain execution check doesn't get @@ -1658,6 +1670,21 @@ mod tests { // the submitted solution changes because the cache was cleared. assert_eq!(tx_cache_1, tx_cache_3); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }) } @@ -1797,7 +1824,7 @@ mod tests { #[test] fn trim_assignments_length_does_not_modify_when_short_enough() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1822,7 +1849,7 @@ mod tests { #[test] fn trim_assignments_length_modifies_when_too_long() { ExtBuilder::default().build().execute_with(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1848,7 +1875,7 @@ mod tests { #[test] fn trim_assignments_length_trims_lowest_stake() { ExtBuilder::default().build().execute_with(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { voters, mut assignments, encoded_size_of, voter_index } = @@ -1911,7 +1938,7 @@ mod tests { // or when we trim it to zero. ExtBuilder::default().build_and_execute(|| { // we need snapshot for `trim_helpers` to work. - roll_to(25); + roll_to_unsigned(); let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); assert!(assignments.len() > 0); @@ -1933,7 +1960,7 @@ mod tests { #[test] fn mine_solution_solutions_always_within_acceptable_length() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); // how long would the default solution be? let solution = MultiPhase::mine_solution().unwrap(); From 7a8de4995715cc6cd11a79eb262bf41e5b190943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 5 Oct 2022 13:10:20 +0200 Subject: [PATCH 40/75] Adapt `pallet-contracts` to WeightV2 (#12421) * Replace contract access weight by proper PoV component * Return the whole weight struct from dry-runs * Fixup `seal_call` and `seal_instantiate` * Fix duplicate extrinsics * Remove ContractAccessWeight from runtime * Fix doc link * Remove leftover debugging output --- Cargo.lock | 1 + bin/node/runtime/src/lib.rs | 1 - frame/contracts/primitives/Cargo.toml | 1 + frame/contracts/primitives/src/lib.rs | 11 +- frame/contracts/src/gas.rs | 61 +++++---- frame/contracts/src/lib.rs | 181 +++++-------------------- frame/contracts/src/tests.rs | 36 ++--- frame/contracts/src/wasm/code_cache.rs | 15 +- 8 files changed, 96 insertions(+), 211 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a35dbba7d089e..309742e5bf17e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5531,6 +5531,7 @@ dependencies = [ "parity-scale-codec", "sp-runtime", "sp-std", + "sp-weights", ] [[package]] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c2d29731ea2e6..4898312f9608f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1175,7 +1175,6 @@ impl pallet_contracts::Config for Runtime { type DeletionWeightLimit = DeletionWeightLimit; type Schedule = Schedule; type AddressGenerator = pallet_contracts::DefaultAddressGenerator; - type ContractAccessWeight = pallet_contracts::DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } diff --git a/frame/contracts/primitives/Cargo.toml b/frame/contracts/primitives/Cargo.toml index 64e332007350b..c8b7c4a2f7c37 100644 --- a/frame/contracts/primitives/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -19,6 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = # Substrate Dependencies (This crate should not rely on frame) sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../../primitives/weights" } [features] default = ["std"] diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/primitives/src/lib.rs index 5daf875ac2651..4faea9eb3ee75 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -26,17 +26,18 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; use sp_std::prelude::*; +use sp_weights::Weight; /// Result type of a `bare_call` or `bare_instantiate` call. /// /// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub struct ContractResult { - /// How much gas was consumed during execution. - pub gas_consumed: u64, - /// How much gas is required as gas limit in order to execute this call. + /// How much weight was consumed during execution. + pub gas_consumed: Weight, + /// How much weight is required as gas limit in order to execute this call. /// - /// This value should be used to determine the gas limit for on-chain execution. + /// This value should be used to determine the weight limit for on-chain execution. /// /// # Note /// @@ -44,7 +45,7 @@ pub struct ContractResult { /// is used. Currently, only `seal_call_runtime` makes use of pre charging. /// Additionally, any `seal_call` or `seal_instantiate` makes use of pre-charging /// when a non-zero `gas_limit` argument is supplied. - pub gas_required: u64, + pub gas_required: Weight, /// How much balance was deposited and reserved during execution in order to pay for storage. /// /// The storage deposit is never actually charged from the caller in case of [`Self::result`] diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 215b4d42daa06..d0076652dd6d4 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -107,32 +107,45 @@ where /// /// Passing `0` as amount is interpreted as "all remaining gas". pub fn nested(&mut self, amount: Weight) -> Result { - let amount = if amount == Weight::zero() { self.gas_left } else { amount }; - // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. - if self.gas_left.any_lt(amount) { - Err(>::OutOfGas.into()) - } else { - self.gas_left -= amount; - Ok(GasMeter::new(amount)) - } + let amount = Weight::from_components( + if amount.ref_time().is_zero() { + self.gas_left().ref_time() + } else { + amount.ref_time() + }, + if amount.proof_size().is_zero() { + self.gas_left().proof_size() + } else { + amount.proof_size() + }, + ); + self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| >::OutOfGas)?; + Ok(GasMeter::new(amount)) } /// Absorb the remaining gas of a nested meter after we are done using it. pub fn absorb_nested(&mut self, nested: Self) { - if self.gas_left == Weight::zero() { + if self.gas_left.ref_time().is_zero() { // All of the remaining gas was inherited by the nested gas meter. When absorbing // we can therefore safely inherit the lowest gas that the nested gas meter experienced // as long as it is lower than the lowest gas that was experienced by the parent. // We cannot call `self.gas_left_lowest()` here because in the state that this // code is run the parent gas meter has `0` gas left. - self.gas_left_lowest = nested.gas_left_lowest().min(self.gas_left_lowest); + *self.gas_left_lowest.ref_time_mut() = + nested.gas_left_lowest().ref_time().min(self.gas_left_lowest.ref_time()); } else { // The nested gas meter was created with a fixed amount that did not consume all of the // parents (self) gas. The lowest gas that self will experience is when the nested // gas was pre charged with the fixed amount. - self.gas_left_lowest = self.gas_left_lowest(); + *self.gas_left_lowest.ref_time_mut() = self.gas_left_lowest().ref_time(); + } + if self.gas_left.proof_size().is_zero() { + *self.gas_left_lowest.proof_size_mut() = + nested.gas_left_lowest().proof_size().min(self.gas_left_lowest.proof_size()); + } else { + *self.gas_left_lowest.proof_size_mut() = self.gas_left_lowest().proof_size(); } self.gas_left += nested.gas_left; } @@ -155,17 +168,11 @@ where ErasedToken { description: format!("{:?}", token), token: Box::new(token) }; self.tokens.push(erased_tok); } - let amount = token.weight(); - let new_value = self.gas_left.checked_sub(&amount); - - // We always consume the gas even if there is not enough gas. - self.gas_left = new_value.unwrap_or_else(Zero::zero); - - match new_value { - Some(_) => Ok(ChargedAmount(amount)), - None => Err(Error::::OutOfGas.into()), - } + // It is OK to not charge anything on failure because we always charge _before_ we perform + // any action + self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| Error::::OutOfGas)?; + Ok(ChargedAmount(amount)) } /// Adjust a previously charged amount down to its actual amount. @@ -298,20 +305,16 @@ mod tests { assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Make sure that if the gas meter is charged by exceeding amount then not only an error - // returned for that charge, but also for all consequent charges. - // - // This is not strictly necessary, because the execution should be interrupted immediately - // if the gas meter runs out of gas. However, this is just a nice property to have. + // Make sure that the gas meter does not charge in case of overcharger #[test] - fn overcharge_is_unrecoverable() { + fn overcharge_does_not_charge() { let mut gas_meter = GasMeter::::new(Weight::from_ref_time(200)); // The first charge is should lead to OOG. assert!(gas_meter.charge(SimpleToken(300)).is_err()); - // The gas meter is emptied at this moment, so this should also fail. - assert!(gas_meter.charge(SimpleToken(1)).is_err()); + // The gas meter should still contain the full 200. + assert!(gas_meter.charge(SimpleToken(200)).is_ok()); } // Charging the exact amount that the user paid for should be diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 3aeb8742705c2..0c90c3ff433b4 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -107,7 +107,7 @@ use crate::{ }; use codec::{Encode, HasCompact}; use frame_support::{ - dispatch::{DispatchClass, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, + dispatch::{Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, ensure, traits::{ tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, @@ -116,7 +116,7 @@ use frame_support::{ weights::{OldWeight, Weight}, BoundedVec, WeakBoundedVec, }; -use frame_system::{limits::BlockWeights, Pallet as System}; +use frame_system::Pallet as System; use pallet_contracts_primitives::{ Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult, ContractInstantiateResult, ExecReturnValue, GetStorageResult, InstantiateReturnValue, @@ -199,29 +199,6 @@ where } } -/// A conservative implementation to be used for [`pallet::Config::ContractAccessWeight`]. -/// -/// This derives the weight from the [`BlockWeights`] passed as `B` and the `maxPovSize` passed -/// as `P`. The default value for `P` is the `maxPovSize` used by Polkadot and Kusama. -/// -/// It simply charges from the weight meter pro rata: If loading the contract code would consume -/// 50% of the max storage proof then this charges 50% of the max block weight. -pub struct DefaultContractAccessWeight, const P: u32 = 5_242_880>( - PhantomData, -); - -impl, const P: u32> Get for DefaultContractAccessWeight { - fn get() -> Weight { - let block_weights = B::get(); - block_weights - .per_class - .get(DispatchClass::Normal) - .max_total - .unwrap_or(block_weights.max_block) / - u64::from(P) - } -} - #[frame_support::pallet] pub mod pallet { use super::*; @@ -334,27 +311,6 @@ pub mod pallet { #[pallet::constant] type DepositPerByte: Get>; - /// The weight per byte of code that is charged when loading a contract from storage. - /// - /// Currently, FRAME only charges fees for computation incurred but not for PoV - /// consumption caused for storage access. This is usually not exploitable because - /// accessing storage carries some substantial weight costs, too. However in case - /// of contract code very much PoV consumption can be caused while consuming very little - /// computation. This could be used to keep the chain busy without paying the - /// proper fee for it. Until this is resolved we charge from the weight meter for - /// contract access. - /// - /// For more information check out: - /// - /// [`DefaultContractAccessWeight`] is a safe default to be used for Polkadot or Kusama - /// parachains. - /// - /// # Note - /// - /// This is only relevant for parachains. Set to zero in case of a standalone chain. - #[pallet::constant] - type ContractAccessWeight: Get; - /// The amount of balance a caller has to pay for each storage item. /// /// # Note @@ -413,23 +369,8 @@ pub mod pallet { T::AccountId: AsRef<[u8]>, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, { - /// Makes a call to an account, optionally transferring some balance. - /// - /// # Parameters - /// - /// * `dest`: Address of the contract to call. - /// * `value`: The balance to transfer from the `origin` to `dest`. - /// * `gas_limit`: The gas limit enforced when executing the constructor. - /// * `storage_deposit_limit`: The maximum amount of balance that can be charged from the - /// caller to pay for the storage consumed. - /// * `data`: The input data to pass to the contract. - /// - /// * If the account is a smart-contract account, the associated code will be - /// executed and any value will be transferred. - /// * If the account is a regular account, any value will be transferred. - /// * If no account exists and the call value is not less than `existential_deposit`, - /// a regular account will be created and any value will be transferred. - #[pallet::weight(T::WeightInfo::call().saturating_add((*gas_limit).into()))] + /// Deprecated version if [`Self::call`] for use in an in-storage `Call`. + #[pallet::weight(T::WeightInfo::call().saturating_add(>::compat_weight(*gas_limit)))] #[allow(deprecated)] #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] pub fn call_old_weight( @@ -440,55 +381,20 @@ pub mod pallet { storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - let mut output = Self::internal_call( + Self::call( origin, dest, value, - gas_limit, - storage_deposit_limit.map(Into::into), + >::compat_weight(gas_limit), + storage_deposit_limit, data, - None, - ); - if let Ok(retval) = &output.result { - if retval.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) + ) } - /// Instantiates a new contract from the supplied `code` optionally transferring - /// some balance. - /// - /// This dispatchable has the same effect as calling [`Self::upload_code`] + - /// [`Self::instantiate`]. Bundling them together provides efficiency gains. Please - /// also check the documentation of [`Self::upload_code`]. - /// - /// # Parameters - /// - /// * `value`: The balance to transfer from the `origin` to the newly created contract. - /// * `gas_limit`: The gas limit enforced when executing the constructor. - /// * `storage_deposit_limit`: The maximum amount of balance that can be charged/reserved - /// from the caller to pay for the storage consumed. - /// * `code`: The contract code to deploy in raw bytes. - /// * `data`: The input data to pass to the contract constructor. - /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. - /// - /// Instantiation is executed as follows: - /// - /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that - /// code. - /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. - /// - The destination address is computed based on the sender, code_hash and the salt. - /// - The smart-contract account is created at the computed address. - /// - The `value` is transferred to the new account. - /// - The `deploy` function is executed in the context of the newly-created account. + /// Deprecated version if [`Self::instantiate_with_code`] for use in an in-storage `Call`. #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) - .saturating_add((*gas_limit).into()) + .saturating_add(>::compat_weight(*gas_limit)) )] #[allow(deprecated)] #[deprecated( @@ -503,38 +409,20 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let code_len = code.len() as u32; - let salt_len = salt.len() as u32; - let mut output = Self::internal_instantiate( + Self::instantiate_with_code( origin, value, - gas_limit, - storage_deposit_limit.map(Into::into), - Code::Upload(code), + >::compat_weight(gas_limit), + storage_deposit_limit, + code, data, salt, - None, - ); - if let Ok(retval) = &output.result { - if retval.1.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result( - output.result.map(|(_address, result)| result), - T::WeightInfo::instantiate_with_code(code_len, salt_len), ) } - /// Instantiates a contract from a previously deployed wasm binary. - /// - /// This function is identical to [`Self::instantiate_with_code`] but without the - /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary - /// must be supplied. + /// Deprecated version if [`Self::instantiate`] for use in an in-storage `Call`. #[pallet::weight( - T::WeightInfo::instantiate(salt.len() as u32).saturating_add((*gas_limit).into()) + T::WeightInfo::instantiate(salt.len() as u32).saturating_add(>::compat_weight(*gas_limit)) )] #[allow(deprecated)] #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `instantiate`")] @@ -547,27 +435,14 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let salt_len = salt.len() as u32; - let mut output = Self::internal_instantiate( + Self::instantiate( origin, value, - gas_limit, - storage_deposit_limit.map(Into::into), - Code::Existing(code_hash), + >::compat_weight(gas_limit), + storage_deposit_limit, + code_hash, data, salt, - None, - ); - if let Ok(retval) = &output.result { - if retval.1.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result( - output.result.map(|(_address, output)| output), - T::WeightInfo::instantiate(salt_len), ) } @@ -1059,8 +934,8 @@ where ); ContractExecResult { result: output.result.map_err(|r| r.error), - gas_consumed: output.gas_meter.gas_consumed().ref_time(), - gas_required: output.gas_meter.gas_required().ref_time(), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -1104,8 +979,8 @@ where .result .map(|(account_id, result)| InstantiateReturnValue { result, account_id }) .map_err(|e| e.error), - gas_consumed: output.gas_meter.gas_consumed().ref_time(), - gas_required: output.gas_meter.gas_required().ref_time(), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -1287,4 +1162,12 @@ where fn min_balance() -> BalanceOf { >>::minimum_balance() } + + /// Convert a 1D Weight to a 2D weight. + /// + /// Used by backwards compatible extrinsics. We cannot just set the proof to zero + /// or an old `Call` will just fail. + fn compat_weight(gas_limit: OldWeight) -> Weight { + Weight::from(gas_limit).set_proof_size(u64::from(T::MaxCodeLen::get()) * 2) + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index b4a8f8f4c834f..6a2144840143a 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -26,8 +26,8 @@ use crate::{ tests::test_utils::{get_contract, get_contract_checked}, wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, - BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, - DefaultContractAccessWeight, DeletionQueue, Error, Pallet, Schedule, + BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, DeletionQueue, + Error, Pallet, Schedule, }; use assert_matches::assert_matches; use codec::Encode; @@ -404,7 +404,6 @@ impl Config for Test { type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; type AddressGenerator = DefaultAddressGenerator; - type ContractAccessWeight = DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } @@ -414,7 +413,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(u64::MAX); +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(256 * 1024); pub struct ExtBuilder { existential_deposit: u64, @@ -674,7 +673,7 @@ fn run_out_of_gas() { RuntimeOrigin::signed(ALICE), addr, // newly created account 0, - Weight::from_ref_time(1_000_000_000_000), + Weight::from_ref_time(1_000_000_000_000).set_proof_size(u64::MAX), None, vec![], ), @@ -1760,7 +1759,7 @@ fn chain_extension_works() { false, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed, gas_consumed + 42); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); let result = Contracts::bare_call( ALICE, addr.clone(), @@ -1771,7 +1770,7 @@ fn chain_extension_works() { false, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed, gas_consumed + 95); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer let result = Contracts::bare_call( @@ -2409,10 +2408,11 @@ fn reinstrument_does_charge() { let result2 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); assert!(!result2.result.unwrap().did_revert()); - assert!(result2.gas_consumed > result1.gas_consumed); + assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); assert_eq!( - result2.gas_consumed, - result1.gas_consumed + ::WeightInfo::reinstrument(code_len).ref_time(), + result2.gas_consumed.ref_time(), + result1.gas_consumed.ref_time() + + ::WeightInfo::reinstrument(code_len).ref_time(), ); }); } @@ -2536,7 +2536,7 @@ fn gas_estimation_nested_call_fixed_limit() { assert_ok!(&result.result); // We have a subcall with a fixed gas limit. This constitutes precharging. - assert!(result.gas_required > result.gas_consumed); + assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); // Make the same call using the estimated gas. Should succeed. assert_ok!( @@ -2544,7 +2544,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), + result.gas_required, Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2557,6 +2557,7 @@ fn gas_estimation_nested_call_fixed_limit() { #[test] #[cfg(feature = "unstable-interface")] fn gas_estimation_call_runtime() { + use codec::Decode; let (caller_code, caller_hash) = compile_module::("call_runtime").unwrap(); let (callee_code, callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -2591,7 +2592,7 @@ fn gas_estimation_call_runtime() { let call = RuntimeCall::Contracts(crate::Call::call { dest: addr_callee, value: 0, - gas_limit: GAS_LIMIT / 3, + gas_limit: GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() / 3), storage_deposit_limit: None, data: vec![], }); @@ -2604,9 +2605,10 @@ fn gas_estimation_call_runtime() { call.encode(), false, ); - assert_ok!(&result.result); - - assert!(result.gas_required > result.gas_consumed); + // contract encodes the result of the dispatch runtime + let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); + assert_eq!(outcome, 0); + assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); // Make the same call using the required gas. Should succeed. assert_ok!( @@ -2614,7 +2616,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), + result.gas_required, None, call.encode(), false, diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 137eccf3db686..09e51d981360b 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -228,16 +228,11 @@ impl Token for CodeToken { // contract code. This is why we subtract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. - let ref_time_weight = match *self { + match *self { Reinstrument(len) => T::WeightInfo::reinstrument(len), - Load(len) => { - let computation = T::WeightInfo::call_with_code_per_byte(len) - .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwidth = T::ContractAccessWeight::get().saturating_mul(len as u64); - computation.max(bandwidth) - }, - }; - - ref_time_weight + Load(len) => T::WeightInfo::call_with_code_per_byte(len) + .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)) + .set_proof_size(len.into()), + } } } From 2cd40882d27ef10437c691b2705e67fa97f7c074 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Wed, 5 Oct 2022 17:11:50 +0300 Subject: [PATCH 41/75] MMR: impl `TypeInfo` for some structures (#12423) * BEEFY client: avoid unnecessary clone * MMR: impl TypeInfo for some structures --- Cargo.lock | 1 + client/beefy/rpc/src/lib.rs | 2 +- client/beefy/src/worker.rs | 2 +- primitives/beefy/src/mmr.rs | 4 ++-- primitives/merkle-mountain-range/Cargo.toml | 1 + primitives/merkle-mountain-range/src/lib.rs | 5 +++-- 6 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 309742e5bf17e..647511373cc31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9999,6 +9999,7 @@ dependencies = [ "array-bytes", "log", "parity-scale-codec", + "scale-info", "serde", "sp-api", "sp-core", diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 0af474116e6d0..6f21abc616db8 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -84,7 +84,7 @@ impl From for JsonRpseeError { // Provides RPC methods for interacting with BEEFY. #[rpc(client, server)] pub trait BeefyApi { - /// Returns the block most recently finalized by BEEFY, alongside side its justification. + /// Returns the block most recently finalized by BEEFY, alongside its justification. #[subscription( name = "beefy_subscribeJustifications" => "beefy_justifications", unsubscribe = "beefy_unsubscribeJustifications", diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 832b43315515f..5bdc72357c412 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -508,7 +508,7 @@ where if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), - (BEEFY_ENGINE_ID, finality_proof.clone().encode()), + (BEEFY_ENGINE_ID, finality_proof.encode()), ) { error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 761eee9f8ef85..471cb96841b8e 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -44,7 +44,7 @@ impl BeefyDataProvider> for () { } /// A standard leaf that gets added every block to the MMR constructed by Substrate's `pallet_mmr`. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct MmrLeaf { /// Version of the leaf format. /// @@ -73,7 +73,7 @@ pub struct MmrLeaf { /// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be /// still decoded, the new fields will simply be ignored). We expect the major version to be bumped /// very rarely (hopefuly never). -#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct MmrLeafVersion(u8); impl MmrLeafVersion { /// Create new version object from `major` and `minor` components. diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index e7e203942e845..0be53132f3eec 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -13,6 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index c40a594739ec1..7a26cae839ea9 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -20,6 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] +use scale_info::TypeInfo; use sp_debug_derive::RuntimeDebug; use sp_runtime::traits; #[cfg(not(feature = "std"))] @@ -69,7 +70,7 @@ impl OnNewRoot for () { } /// A MMR proof data for one of the leaves. -#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] pub struct Proof { /// The index of the leaf the proof is for. pub leaf_index: LeafIndex, @@ -352,7 +353,7 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); /// A MMR proof data for a group of leaves. -#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] pub struct BatchProof { /// The indices of the leaves the proof is for. pub leaf_indices: Vec, From 4cd3248c6ac9e29a45e2b52b92859cbe12769500 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Oct 2022 16:45:56 +0200 Subject: [PATCH 42/75] Don't send back empty proofs if light request fails (#12372) --- .../src/light_client_requests/handler.rs | 50 ++++++++----------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 727a9b0d7e820..5efdc3ff6a18b 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -28,7 +28,7 @@ use futures::{channel::mpsc, prelude::*}; use libp2p::PeerId; use log::{debug, trace}; use prost::Message; -use sc_client_api::{BlockBackend, ProofProvider, StorageProof}; +use sc_client_api::{BlockBackend, ProofProvider}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -176,12 +176,15 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = + let response = match self .client .execution_proof(&BlockId::Hash(block), &request.method, &request.data) { - Ok((_, proof)) => proof, + Ok((_, proof)) => { + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteCallResponse(r)) + }, Err(e) => { trace!( "remote call request from {} ({} at {:?}) failed with: {}", @@ -190,16 +193,11 @@ where request.block, e, ); - StorageProof::empty() + None }, }; - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteCallResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) + Ok(schema::v1::light::Response { response }) } fn on_remote_read_request( @@ -221,11 +219,14 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self + let response = match self .client .read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { - Ok(proof) => proof, + Ok(proof) => { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteReadResponse(r)) + }, Err(error) => { trace!( "remote read request from {} ({} at {:?}) failed with: {}", @@ -234,16 +235,11 @@ where request.block, error, ); - StorageProof::empty() + None }, }; - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) + Ok(schema::v1::light::Response { response }) } fn on_remote_read_child_request( @@ -271,14 +267,17 @@ where Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err(sp_blockchain::Error::InvalidChildStorageKey), }; - let proof = match child_info.and_then(|child_info| { + let response = match child_info.and_then(|child_info| { self.client.read_child_proof( &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), ) }) { - Ok(proof) => proof, + Ok(proof) => { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteReadResponse(r)) + }, Err(error) => { trace!( "remote read child request from {} ({} {} at {:?}) failed with: {}", @@ -288,16 +287,11 @@ where request.block, error, ); - StorageProof::empty() + None }, }; - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) + Ok(schema::v1::light::Response { response }) } } From 87224cf2cdafbacd0acac33c43a1063c02e02147 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:11:05 +0200 Subject: [PATCH 43/75] Implement `Clone` and `Default` for `Config` (#12397) * Implement `Clone` and `Default` for `Config` * `cargo fmt` * Remove default config implementation --- client/executor/wasmtime/src/runtime.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 5925a1792aef2..e3509351022bc 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -544,6 +544,7 @@ pub struct Semantics { pub max_memory_size: Option, } +#[derive(Clone)] pub struct Config { /// The WebAssembly standard requires all imports of an instantiated module to be resolved, /// otherwise, the instantiation fails. If this option is set to `true`, then this behavior is From eefba93cf62ff80e1011dbe1cd3a543b711f7bb9 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 5 Oct 2022 19:21:37 +0100 Subject: [PATCH 44/75] Bound uses of `Call` (#11649) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Introduce preimages module in traits * Multisize Preimages * Len not actually necessary * Tweaks to the preimage API * Fixes * Get Scheduler building with new API * Scheduler tests pass * Bounded Scheduler 🎉 * Use Agenda holes and introduce IncompleteSince to avoid need to reschedule * Tests pass with new weight system * New benchmarks * Add missing file * Drop preimage when permenantly overeight * Drop preimage when permenantly overeight * Referenda uses latest preimage API * Testing ok * Adding tests Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Add preimage migration Signed-off-by: Oliver Tale-Yazdi * Docs * Remove dbg * Refactor Democracy * Refactor Democracy * Add final MEL * Remove silly maps * Fixes * Minor refactor * Formatting * Fixes * Fixes * Fixes * Update frame/preimage/src/lib.rs Co-authored-by: Shawn Tabrizi * Add migrations to Democracy * WIP Signed-off-by: Oliver Tale-Yazdi * Resolve conflicts Signed-off-by: Oliver Tale-Yazdi * Revert "Resolve conflicts" This reverts commit a89cd0a073a7aabbb8ce6bcb12d4687717aac60e. * Undo wrong resolves... Signed-off-by: Oliver Tale-Yazdi * WIP Signed-off-by: Oliver Tale-Yazdi * Make compile Signed-off-by: Oliver Tale-Yazdi * massage clippy Signed-off-by: Oliver Tale-Yazdi * More clippy Signed-off-by: Oliver Tale-Yazdi * clippy annoyance Signed-off-by: Oliver Tale-Yazdi * clippy annoyance Signed-off-by: Oliver Tale-Yazdi * Fix benchmarks Signed-off-by: Oliver Tale-Yazdi * add missing file * Test Signed-off-by: Oliver Tale-Yazdi * More tests Signed-off-by: Oliver Tale-Yazdi * Clippy harassment Signed-off-by: Oliver Tale-Yazdi * Add test Signed-off-by: Oliver Tale-Yazdi * clippy Signed-off-by: Oliver Tale-Yazdi * Fixup tests Signed-off-by: Oliver Tale-Yazdi * Remove old stuff Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Test trait functions Signed-off-by: Oliver Tale-Yazdi * Update pallet-ui tests Why is this needed? Should not be the case unless master is broken... Signed-off-by: Oliver Tale-Yazdi * More scheduler trait test Signed-off-by: Oliver Tale-Yazdi * More tests Signed-off-by: Oliver Tale-Yazdi * Apply review suggestion Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * Add Scheduler test migration_v3_to_v4_works Signed-off-by: Oliver Tale-Yazdi * Merge fixup Signed-off-by: Oliver Tale-Yazdi * Keep referenda benchmarks instantiatable Signed-off-by: Oliver Tale-Yazdi * Update weights Signed-off-by: Oliver Tale-Yazdi * Use new scheduler weight functions Signed-off-by: Oliver Tale-Yazdi * Use new democracy weight functions Signed-off-by: Oliver Tale-Yazdi * Use weight compare functions Signed-off-by: Oliver Tale-Yazdi * Update pallet-ui tests Signed-off-by: Oliver Tale-Yazdi * More renaming… Signed-off-by: Oliver Tale-Yazdi * More renaming… Signed-off-by: Oliver Tale-Yazdi * Add comment Signed-off-by: Oliver Tale-Yazdi * Implement OnRuntimeUpgrade for scheduler::v3_to_v4 migration Put the migration into a proper `MigrateToV4` struct and implement the OnRuntimeUpgrade hooks for it. Also move the test to use that instead. This should make it easier for adding it to Polkadot. Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * Handle undecodable Agendas Signed-off-by: Oliver Tale-Yazdi * Remove trash Signed-off-by: Oliver Tale-Yazdi * Fix test Signed-off-by: Oliver Tale-Yazdi * Use new OnRuntimeUpgrade functions Signed-off-by: Oliver Tale-Yazdi * fix test Signed-off-by: Oliver Tale-Yazdi * Fix BoundedSlice::truncate_from Co-authored-by: jakoblell Signed-off-by: Oliver Tale-Yazdi * Fix pre_upgrade hook return values Signed-off-by: Oliver Tale-Yazdi * Add more error logging Signed-off-by: Oliver Tale-Yazdi * Find too large preimages in the pre_upgrade hook Signed-off-by: Oliver Tale-Yazdi * Test that too large Calls in agendas are ignored Signed-off-by: Oliver Tale-Yazdi * Use new OnRuntimeUpgrade hooks Why did the CI not catch this?! Signed-off-by: Oliver Tale-Yazdi * works fine - just more logs Signed-off-by: Oliver Tale-Yazdi * Fix staking migration Causing issues on Kusama... Signed-off-by: Oliver Tale-Yazdi * Fix UI tests No idea why this is needed. This is actually undoing an earlier change. Maybe the CI has different rustc versions!? Signed-off-by: Oliver Tale-Yazdi * Remove multisig's Calls (#12072) * Remove multisig's Calls * Multisig: Fix tests and re-introduce reserve logic (#12241) * Fix tests and re-introduce reserve logic * fix benches * add todo * remove irrelevant bench * [Feature] Add a migration that drains and refunds stored calls (#12313) * [Feature] Add a migration that drains and refunds stored calls * migration fixes * fixes * address review comments * consume the whole block weight * fix assertions * license header * fix interface Co-authored-by: parity-processbot <> Co-authored-by: parity-processbot <> Co-authored-by: Roman Useinov * Fix test Signed-off-by: Oliver Tale-Yazdi * Fix multisig benchmarks Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_democracy * ".git/.scripts/bench-bot.sh" pallet dev pallet_scheduler * ".git/.scripts/bench-bot.sh" pallet dev pallet_preimage Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Shawn Tabrizi Co-authored-by: parity-processbot <> Co-authored-by: Roman Useinov --- Cargo.lock | 4 + bin/node/runtime/src/lib.rs | 16 +- frame/bounties/src/lib.rs | 2 +- frame/contracts/src/storage.rs | 2 +- frame/conviction-voting/src/lib.rs | 2 +- frame/democracy/Cargo.toml | 7 +- frame/democracy/src/benchmarking.rs | 394 ++--- frame/democracy/src/conviction.rs | 16 +- frame/democracy/src/lib.rs | 746 +++------ frame/democracy/src/migrations.rs | 236 +++ frame/democracy/src/tests.rs | 76 +- frame/democracy/src/tests/cancellation.rs | 30 +- frame/democracy/src/tests/decoders.rs | 40 +- frame/democracy/src/tests/delegation.rs | 8 +- .../democracy/src/tests/external_proposing.rs | 104 +- frame/democracy/src/tests/fast_tracking.rs | 27 +- frame/democracy/src/tests/lock_voting.rs | 36 +- frame/democracy/src/tests/preimage.rs | 237 --- frame/democracy/src/tests/public_proposals.rs | 61 +- frame/democracy/src/tests/scheduling.rs | 35 +- frame/democracy/src/tests/voting.rs | 10 +- frame/democracy/src/types.rs | 28 +- frame/democracy/src/vote.rs | 48 +- frame/democracy/src/vote_threshold.rs | 6 +- frame/democracy/src/weights.rs | 346 ++-- frame/identity/src/tests.rs | 2 +- frame/multisig/Cargo.toml | 3 + frame/multisig/src/benchmarking.rs | 124 +- frame/multisig/src/lib.rs | 148 +- frame/multisig/src/migrations.rs | 86 + frame/multisig/src/tests.rs | 293 +--- frame/nicks/src/lib.rs | 4 +- frame/preimage/Cargo.toml | 6 +- frame/preimage/src/benchmarking.rs | 26 +- frame/preimage/src/lib.rs | 237 ++- frame/preimage/src/migration.rs | 263 +++ frame/preimage/src/mock.rs | 1 - frame/preimage/src/tests.rs | 279 +++- frame/preimage/src/weights.rs | 124 +- frame/recovery/src/lib.rs | 4 +- frame/referenda/src/benchmarking.rs | 16 +- frame/referenda/src/lib.rs | 104 +- frame/referenda/src/mock.rs | 18 +- frame/referenda/src/tests.rs | 32 +- frame/referenda/src/types.rs | 22 +- frame/scheduler/src/benchmarking.rs | 323 ++-- frame/scheduler/src/lib.rs | 984 +++++++---- frame/scheduler/src/migration.rs | 402 +++++ frame/scheduler/src/mock.rs | 56 +- frame/scheduler/src/tests.rs | 1482 ++++++++++++----- frame/scheduler/src/weights.rs | 316 ++-- .../src/construct_runtime/expand/origin.rs | 27 +- frame/support/src/dispatch.rs | 21 +- frame/support/src/traits.rs | 15 +- frame/support/src/traits/dispatch.rs | 38 +- frame/support/src/traits/misc.rs | 2 +- frame/support/src/traits/preimages.rs | 317 ++++ frame/support/src/traits/schedule.rs | 99 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- frame/system/src/lib.rs | 5 +- frame/whitelist/src/mock.rs | 1 - primitives/core/src/bounded/bounded_vec.rs | 30 +- primitives/runtime/src/lib.rs | 55 +- test-utils/runtime/src/lib.rs | 35 +- 67 files changed, 5067 insertions(+), 3466 deletions(-) create mode 100644 frame/democracy/src/migrations.rs delete mode 100644 frame/democracy/src/tests/preimage.rs create mode 100644 frame/multisig/src/migrations.rs create mode 100644 frame/preimage/src/migration.rs create mode 100644 frame/scheduler/src/migration.rs create mode 100644 frame/support/src/traits/preimages.rs diff --git a/Cargo.lock b/Cargo.lock index 647511373cc31..53f370a930626 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5581,7 +5581,9 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", + "pallet-preimage", "pallet-scheduler", "parity-scale-codec", "scale-info", @@ -5899,6 +5901,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -6065,6 +6068,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", "parity-scale-codec", "scale-info", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4898312f9608f..d10448cc2d183 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -340,8 +340,6 @@ impl pallet_proxy::Config for Runtime { parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; - // Retry a scheduled item every 10 blocks (1 minute) until the preimage exists. - pub const NoPreimagePostponement: Option = Some(10); } impl pallet_scheduler::Config for Runtime { @@ -351,11 +349,10 @@ impl pallet_scheduler::Config for Runtime { type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; - type MaxScheduledPerBlock = ConstU32<50>; + type MaxScheduledPerBlock = ConstU32<512>; type WeightInfo = pallet_scheduler::weights::SubstrateWeight; type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = Preimage; - type NoPreimagePostponement = NoPreimagePostponement; + type Preimages = Preimage; } parameter_types! { @@ -370,7 +367,6 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type MaxSize = PreimageMaxSize; type BaseDeposit = PreimageBaseDeposit; type ByteDeposit = PreimageByteDeposit; } @@ -862,6 +858,7 @@ impl pallet_referenda::Config for Runtime { type UndecidingTimeout = UndecidingTimeout; type AlarmInterval = AlarmInterval; type Tracks = TracksInfo; + type Preimages = Preimage; } impl pallet_referenda::Config for Runtime { @@ -881,6 +878,7 @@ impl pallet_referenda::Config for Runtime { type UndecidingTimeout = UndecidingTimeout; type AlarmInterval = AlarmInterval; type Tracks = TracksInfo; + type Preimages = Preimage; } impl pallet_ranked_collective::Config for Runtime { @@ -909,7 +907,6 @@ parameter_types! { } impl pallet_democracy::Config for Runtime { - type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type Currency = Balances; type EnactmentPeriod = EnactmentPeriod; @@ -949,14 +946,15 @@ impl pallet_democracy::Config for Runtime { // only do it once and it lasts only for the cool-off period. type VetoOrigin = pallet_collective::EnsureMember; type CooloffPeriod = CooloffPeriod; - type PreimageByteDeposit = PreimageByteDeposit; - type OperationalPreimageOrigin = pallet_collective::EnsureMember; type Slash = Treasury; type Scheduler = Scheduler; type PalletsOrigin = OriginCaller; type MaxVotes = ConstU32<100>; type WeightInfo = pallet_democracy::weights::SubstrateWeight; type MaxProposals = MaxProposals; + type Preimages = Preimage; + type MaxDeposits = ConstU32<100>; + type MaxBlacklisted = ConstU32<100>; } parameter_types! { diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index b95940a2835ce..d947226f87fa0 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -819,7 +819,7 @@ impl, I: 'static> Pallet { value: BalanceOf, ) -> DispatchResult { let bounded_description: BoundedVec<_, _> = - description.try_into().map_err(|()| Error::::ReasonTooBig)?; + description.try_into().map_err(|_| Error::::ReasonTooBig)?; ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); let index = Self::bounty_count(); diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index c3fc0840d8649..cf10c3225c920 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -340,7 +340,7 @@ where let queue: Vec = (0..T::DeletionQueueDepth::get()) .map(|_| DeletedContract { trie_id: TrieId::default() }) .collect(); - let bounded: BoundedVec<_, _> = queue.try_into().unwrap(); + let bounded: BoundedVec<_, _> = queue.try_into().map_err(|_| ()).unwrap(); >::put(bounded); } } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 534941d6f7f66..b876a9354ee59 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -400,7 +400,7 @@ impl, I: 'static> Pallet { Err(i) => { votes .try_insert(i, (poll_index, vote)) - .map_err(|()| Error::::MaxVotesReached)?; + .map_err(|_| Error::::MaxVotesReached)?; }, } // Shouldn't be possible to fail, but we handle it gracefully. diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index f0ab3162c892b..e50d39ff76902 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -24,11 +24,13 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } +log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-scheduler = { version = "4.0.0-dev", path = "../scheduler" } -sp-core = { version = "6.0.0", path = "../../primitives/core" } +pallet-preimage = { version = "4.0.0-dev", path = "../preimage" } [features] default = ["std"] @@ -42,6 +44,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-core/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -49,4 +52,4 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = ["frame-support/try-runtime",] diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index ab7ee3331e319..424192e2521da 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -22,24 +22,16 @@ use super::*; use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{ assert_noop, assert_ok, - codec::Decode, - traits::{ - schedule::DispatchTime, Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable, - }, + traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, }; -use frame_system::{Pallet as System, RawOrigin}; -use sp_runtime::traits::{BadOrigin, Bounded, One}; +use frame_system::RawOrigin; +use sp_core::H256; +use sp_runtime::{traits::Bounded, BoundedVec}; use crate::Pallet as Democracy; +const REFERENDUM_COUNT_HINT: u32 = 10; const SEED: u32 = 0; -const MAX_REFERENDUMS: u32 = 99; -const MAX_SECONDERS: u32 = 100; -const MAX_BYTES: u32 = 16_384; - -fn assert_last_event(generic_event: ::RuntimeEvent) { - frame_system::Pallet::::assert_last_event(generic_event.into()); -} fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); @@ -49,37 +41,32 @@ fn funded_account(name: &'static str, index: u32) -> T::AccountId { caller } -fn add_proposal(n: u32) -> Result { +fn make_proposal(n: u32) -> BoundedCallOf { + let call: CallOf = frame_system::Call::remark { remark: n.encode() }.into(); + ::Preimages::bound(call).unwrap() +} + +fn add_proposal(n: u32) -> Result { let other = funded_account::("proposer", n); let value = T::MinimumDeposit::get(); - let proposal_hash: T::Hash = T::Hashing::hash_of(&n); - - Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value)?; - - Ok(proposal_hash) + let proposal = make_proposal::(n); + Democracy::::propose(RawOrigin::Signed(other).into(), proposal.clone(), value)?; + Ok(proposal.hash()) } -fn add_referendum(n: u32) -> Result { - let proposal_hash: T::Hash = T::Hashing::hash_of(&n); +fn add_referendum(n: u32) -> (ReferendumIndex, H256) { let vote_threshold = VoteThreshold::SimpleMajority; - - Democracy::::inject_referendum( - T::LaunchPeriod::get(), - proposal_hash, - vote_threshold, - 0u32.into(), - ); - let referendum_index: ReferendumIndex = ReferendumCount::::get() - 1; - T::Scheduler::schedule_named( - (DEMOCRACY_ID, referendum_index).encode(), - DispatchTime::At(2u32.into()), - None, - 63, - frame_system::RawOrigin::Root.into(), - Call::enact_proposal { proposal_hash, index: referendum_index }.into(), + let proposal = make_proposal::(n); + let hash = proposal.hash(); + ( + Democracy::::inject_referendum( + T::LaunchPeriod::get(), + proposal, + vote_threshold, + 0u32.into(), + ), + hash, ) - .map_err(|_| "failed to schedule named")?; - Ok(referendum_index) } fn account_vote(b: BalanceOf) -> AccountVote> { @@ -97,95 +84,90 @@ benchmarks! { } let caller = funded_account::("caller", 0); - let proposal_hash: T::Hash = T::Hashing::hash_of(&0); + let proposal = make_proposal::(0); let value = T::MinimumDeposit::get(); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal_hash, value) + }: _(RawOrigin::Signed(caller), proposal, value) verify { assert_eq!(Democracy::::public_props().len(), p as usize, "Proposals not created."); } second { - let s in 0 .. MAX_SECONDERS; - let caller = funded_account::("caller", 0); - let proposal_hash = add_proposal::(s)?; + add_proposal::(0)?; // Create s existing "seconds" - for i in 0 .. s { + // we must reserve one deposit for the `proposal` and one for our benchmarked `second` call. + for i in 0 .. T::MaxDeposits::get() - 2 { let seconder = funded_account::("seconder", i); - Democracy::::second(RawOrigin::Signed(seconder).into(), 0, u32::MAX)?; + Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; } let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (s + 1) as usize, "Seconds not recorded"); + assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), 0, u32::MAX) + }: _(RawOrigin::Signed(caller), 0) verify { let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (s + 2) as usize, "`second` benchmark did not work"); + assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); } vote_new { - let r in 1 .. MAX_REFERENDUMS; - let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + for i in 0 .. T::MaxVotes::get() - 1 { + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), r as usize, "Votes were not recorded."); + assert_eq!(votes.len(), (T::MaxVotes::get() - 1) as usize, "Votes were not recorded."); - let referendum_index = add_referendum::(r)?; + let ref_index = add_referendum::(T::MaxVotes::get() - 1).0; whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), referendum_index, account_vote) + }: vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), (r + 1) as usize, "Vote was not recorded."); + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was not recorded."); } vote_existing { - let r in 1 .. MAX_REFERENDUMS; - let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0 ..=r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + for i in 0..T::MaxVotes::get() { + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), (r + 1) as usize, "Votes were not recorded."); + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Votes were not recorded."); // Change vote from aye to nay let nay = Vote { aye: false, conviction: Conviction::Locked1x }; let new_vote = AccountVote::Standard { vote: nay, balance: 1000u32.into() }; - let referendum_index = Democracy::::referendum_count() - 1; + let ref_index = Democracy::::referendum_count() - 1; // This tests when a user changes a vote whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), referendum_index, new_vote) + }: vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), (r + 1) as usize, "Vote was incorrectly added"); - let referendum_info = Democracy::::referendum_info(referendum_index) + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); + let referendum_info = Democracy::::referendum_info(ref_index) .ok_or("referendum doesn't exist")?; let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, @@ -196,61 +178,55 @@ benchmarks! { emergency_cancel { let origin = T::CancellationOrigin::successful_origin(); - let referendum_index = add_referendum::(0)?; - assert_ok!(Democracy::::referendum_status(referendum_index)); - }: _(origin, referendum_index) + let ref_index = add_referendum::(0).0; + assert_ok!(Democracy::::referendum_status(ref_index)); + }: _(origin, ref_index) verify { // Referendum has been canceled assert_noop!( - Democracy::::referendum_status(referendum_index), + Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid, ); } blacklist { - let p in 1 .. T::MaxProposals::get(); - // Place our proposal at the end to make sure it's worst case. - for i in 0 .. p - 1 { + for i in 0 .. T::MaxProposals::get() - 1 { add_proposal::(i)?; } // We should really add a lot of seconds here, but we're not doing it elsewhere. + // Add a referendum of our proposal. + let (ref_index, hash) = add_referendum::(0); + assert_ok!(Democracy::::referendum_status(ref_index)); // Place our proposal in the external queue, too. - let hash = T::Hashing::hash_of(&0); assert_ok!( - Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash) + Democracy::::external_propose(T::ExternalOrigin::successful_origin(), make_proposal::(0)) ); let origin = T::BlacklistOrigin::successful_origin(); - // Add a referendum of our proposal. - let referendum_index = add_referendum::(0)?; - assert_ok!(Democracy::::referendum_status(referendum_index)); - }: _(origin, hash, Some(referendum_index)) + }: _(origin, hash, Some(ref_index)) verify { // Referendum has been canceled assert_noop!( - Democracy::::referendum_status(referendum_index), + Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid ); } // Worst case scenario, we external propose a previously blacklisted proposal external_propose { - let v in 1 .. MAX_VETOERS as u32; - let origin = T::ExternalOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&0); + let proposal = make_proposal::(0); // Add proposal to blacklist with block number 0 - let addresses = (0..v) + let addresses: BoundedVec<_, _> = (0..(T::MaxBlacklisted::get() - 1)) .into_iter() .map(|i| account::("blacklist", i, SEED)) - .collect::>(); - Blacklist::::insert( - proposal_hash, - (T::BlockNumber::zero(), addresses), - ); - }: _(origin, proposal_hash) + .collect::>() + .try_into() + .unwrap(); + Blacklist::::insert(proposal.hash(), (T::BlockNumber::zero(), addresses)); + }: _(origin, proposal) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -258,8 +234,8 @@ benchmarks! { external_propose_majority { let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&0); - }: _(origin, proposal_hash) + let proposal = make_proposal::(0); + }: _(origin, proposal) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -267,8 +243,8 @@ benchmarks! { external_propose_default { let origin = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&0); - }: _(origin, proposal_hash) + let proposal = make_proposal::(0); + }: _(origin, proposal) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -276,8 +252,9 @@ benchmarks! { fast_track { let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash: T::Hash = T::Hashing::hash_of(&0); - Democracy::::external_propose_default(origin_propose, proposal_hash)?; + let proposal = make_proposal::(0); + let proposal_hash = proposal.hash(); + Democracy::::external_propose_default(origin_propose, proposal)?; // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. let origin_fast_track = T::FastTrackOrigin::successful_origin(); @@ -289,17 +266,15 @@ benchmarks! { } veto_external { - // Existing veto-ers - let v in 0 .. MAX_VETOERS as u32; - - let proposal_hash: T::Hash = T::Hashing::hash_of(&v); + let proposal = make_proposal::(0); + let proposal_hash = proposal.hash(); let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - Democracy::::external_propose_default(origin_propose, proposal_hash)?; + Democracy::::external_propose_default(origin_propose, proposal)?; - let mut vetoers: Vec = Vec::new(); - for i in 0 .. v { - vetoers.push(account::("vetoer", i, SEED)); + let mut vetoers: BoundedVec = Default::default(); + for i in 0 .. (T::MaxBlacklisted::get() - 1) { + vetoers.try_push(account::("vetoer", i, SEED)).unwrap(); } vetoers.sort(); Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); @@ -310,42 +285,27 @@ benchmarks! { verify { assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; - assert_eq!(new_vetoers.len(), (v + 1) as usize, "vetoers not added"); + assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); } cancel_proposal { - let p in 1 .. T::MaxProposals::get(); - // Place our proposal at the end to make sure it's worst case. - for i in 0 .. p { + for i in 0 .. T::MaxProposals::get() { add_proposal::(i)?; } - let cancel_origin = T::CancelProposalOrigin::successful_origin(); }: _(cancel_origin, 0) cancel_referendum { - let referendum_index = add_referendum::(0)?; - }: _(RawOrigin::Root, referendum_index) - - cancel_queued { - let r in 1 .. MAX_REFERENDUMS; - - for i in 0..r { - add_referendum::(i)?; // This add one element in the scheduler - } - - let referendum_index = add_referendum::(r)?; - }: _(RawOrigin::Root, referendum_index) + let ref_index = add_referendum::(0).0; + }: _(RawOrigin::Root, ref_index) - // This measures the path of `launch_next` external. Not currently used as we simply - // assume the weight is `MaxBlockWeight` when executing. #[extra] on_initialize_external { - let r in 0 .. MAX_REFERENDUMS; + let r in 0 .. REFERENDUM_COUNT_HINT; for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); @@ -354,8 +314,8 @@ benchmarks! { LastTabledWasExternal::::put(false); let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&r); - let call = Call::::external_propose_majority { proposal_hash }; + let proposal = make_proposal::(r); + let call = Call::::external_propose_majority { proposal }; call.dispatch_bypass_filter(origin)?; // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -379,14 +339,12 @@ benchmarks! { } } - // This measures the path of `launch_next` public. Not currently used as we simply - // assume the weight is `MaxBlockWeight` when executing. #[extra] on_initialize_public { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); @@ -415,10 +373,10 @@ benchmarks! { // No launch no maturing referenda. on_initialize_base { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } for (key, mut info) in ReferendumInfoOf::::iter() { @@ -445,10 +403,10 @@ benchmarks! { } on_initialize_base_with_launch_period { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } for (key, mut info) in ReferendumInfoOf::::iter() { @@ -477,7 +435,7 @@ benchmarks! { } delegate { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -504,8 +462,8 @@ benchmarks! { let account_vote = account_vote::(initial_balance); // We need to create existing direct votes for the `new_delegate` for i in 0..r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_idx, account_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&new_delegate) { Voting::Direct { votes, .. } => votes, @@ -529,7 +487,7 @@ benchmarks! { } undelegate { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -553,10 +511,10 @@ benchmarks! { // We need to create votes direct votes for the `delegate` let account_vote = account_vote::(initial_balance); for i in 0..r { - let ref_idx = add_referendum::(i)?; + let ref_index = add_referendum::(i).0; Democracy::::vote( RawOrigin::Signed(the_delegate.clone()).into(), - ref_idx, + ref_index, account_vote )?; } @@ -580,71 +538,9 @@ benchmarks! { }: _(RawOrigin::Root) - note_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let caller = funded_account::("caller", 0); - let encoded_proposal = vec![1; b as usize]; - whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), encoded_proposal.clone()) - verify { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - } - - note_imminent_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - // d + 1 to include the one we are testing - let encoded_proposal = vec![1; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - let block_number = T::BlockNumber::one(); - Preimages::::insert(&proposal_hash, PreimageStatus::Missing(block_number)); - - let caller = funded_account::("caller", 0); - let encoded_proposal = vec![1; b as usize]; - whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), encoded_proposal.clone()) - verify { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - } - - reap_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let encoded_proposal = vec![1; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - - let submitter = funded_account::("submitter", b); - Democracy::::note_preimage(RawOrigin::Signed(submitter).into(), encoded_proposal.clone())?; - - // We need to set this otherwise we get `Early` error. - let block_number = T::VotingPeriod::get() + T::EnactmentPeriod::get() + T::BlockNumber::one(); - System::::set_block_number(block_number); - - assert!(Preimages::::contains_key(proposal_hash)); - - let caller = funded_account::("caller", 0); - whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal_hash, u32::MAX) - verify { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - assert!(!Preimages::::contains_key(proposal_hash)); - } - // Test when unlock will remove locks unlock_remove { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); @@ -653,9 +549,9 @@ benchmarks! { let small_vote = account_vote::(base_balance); // Vote and immediately unvote for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_idx, small_vote)?; - Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_idx)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; + Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; } let caller = funded_account::("caller", 0); @@ -669,7 +565,7 @@ benchmarks! { // Test when unlock will set a new value unlock_set { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); @@ -677,14 +573,14 @@ benchmarks! { let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_idx, small_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; } // Create a big vote so lock increases let big_vote = account_vote::(base_balance * 10u32.into()); - let referendum_index = add_referendum::(r)?; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), referendum_index, big_vote)?; + let ref_index = add_referendum::(r).0; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, big_vote)?; let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, @@ -695,7 +591,7 @@ benchmarks! { let voting = VotingOf::::get(&locker); assert_eq!(voting.locked_balance(), base_balance * 10u32.into()); - Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), referendum_index)?; + Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; let caller = funded_account::("caller", 0); whitelist_account!(caller); @@ -709,18 +605,18 @@ benchmarks! { let voting = VotingOf::::get(&locker); // Note that we may want to add a `get_lock` api to actually verify - assert_eq!(voting.locked_balance(), base_balance); + assert_eq!(voting.locked_balance(), if r > 0 { base_balance } else { 0u32.into() }); } remove_vote { - let r in 1 .. MAX_REFERENDUMS; + let r in 1 .. T::MaxVotes::get(); let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -729,9 +625,9 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes not created"); - let referendum_index = r - 1; + let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), referendum_index) + }: _(RawOrigin::Signed(caller.clone()), ref_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -742,15 +638,15 @@ benchmarks! { // Worst case is when target == caller and referendum is ongoing remove_other_vote { - let r in 1 .. MAX_REFERENDUMS; + let r in 1 .. T::MaxVotes::get(); let caller = funded_account::("caller", r); let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -759,9 +655,9 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes not created"); - let referendum_index = r - 1; + let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), caller_lookup, referendum_index) + }: _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -770,54 +666,6 @@ benchmarks! { assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); } - #[extra] - enact_proposal_execute { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let proposer = funded_account::("proposer", 0); - let raw_call = Call::note_preimage { encoded_proposal: vec![1; b as usize] }; - let generic_call: T::Proposal = raw_call.into(); - let encoded_proposal = generic_call.encode(); - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - Democracy::::note_preimage(RawOrigin::Signed(proposer).into(), encoded_proposal)?; - - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - }: enact_proposal(RawOrigin::Root, proposal_hash, 0) - verify { - // Fails due to mismatched origin - assert_last_event::(Event::::Executed { ref_index: 0, result: Err(BadOrigin.into()) }.into()); - } - - #[extra] - enact_proposal_slash { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let proposer = funded_account::("proposer", 0); - // Random invalid bytes - let encoded_proposal = vec![200; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - Democracy::::note_preimage(RawOrigin::Signed(proposer).into(), encoded_proposal)?; - - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - let origin = RawOrigin::Root.into(); - let call = Call::::enact_proposal { proposal_hash, index: 0 }.encode(); - }: { - assert_eq!( - as Decode>::decode(&mut &*call) - .expect("call is encoded above, encoding must be correct") - .dispatch_bypass_filter(origin), - Err(Error::::PreimageInvalid.into()) - ); - } - impl_benchmark_test_suite!( Democracy, crate::tests::new_test_ext(), diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs index 57d631e8c1f4c..a938d8a4e6852 100644 --- a/frame/democracy/src/conviction.rs +++ b/frame/democracy/src/conviction.rs @@ -18,7 +18,7 @@ //! The conviction datatype. use crate::types::Delegations; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Bounded, CheckedDiv, CheckedMul, Zero}, @@ -27,7 +27,19 @@ use sp_runtime::{ use sp_std::{prelude::*, result::Result}; /// A value denoting the strength of conviction of a vote. -#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo)] +#[derive( + Encode, + MaxEncodedLen, + Decode, + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + RuntimeDebug, + TypeInfo, +)] pub enum Conviction { /// 0.1x votes, unlocked. None, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 3c1be19103998..cf954d4800eee 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -152,21 +152,20 @@ #![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, Input}; +use codec::{Decode, Encode}; use frame_support::{ ensure, traits::{ defensive_prelude::*, - schedule::{DispatchTime, Named as ScheduleNamed}, - BalanceStatus, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, - ReservableCurrency, WithdrawReasons, + schedule::{v3::Named as ScheduleNamed, DispatchTime}, + Bounded, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, QueryPreimage, + ReservableCurrency, StorePreimage, WithdrawReasons, }, weights::Weight, }; -use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded, Dispatchable, Hash, Saturating, StaticLookup, Zero}, - ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, + traits::{Bounded as ArithBounded, One, Saturating, StaticLookup, Zero}, + ArithmeticError, DispatchError, DispatchResult, }; use sp_std::prelude::*; @@ -188,12 +187,9 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -const DEMOCRACY_ID: LockIdentifier = *b"democrac"; +pub mod migrations; -/// The maximum number of vetoers on a single proposal used to compute Weight. -/// -/// NOTE: This is not enforced by any logic. -pub const MAX_VETOERS: u32 = 100; +const DEMOCRACY_ID: LockIdentifier = *b"democrac"; /// A proposal index. pub type PropIndex = u32; @@ -206,58 +202,36 @@ type BalanceOf = type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; +pub type CallOf = ::RuntimeCall; +pub type BoundedCallOf = Bounded>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum PreimageStatus { - /// The preimage is imminently needed at the argument. - Missing(BlockNumber), - /// The preimage is available. - Available { - data: Vec, - provider: AccountId, - deposit: Balance, - since: BlockNumber, - /// None if it's not imminent. - expiry: Option, - }, -} - -impl PreimageStatus { - fn to_missing_expiry(self) -> Option { - match self { - PreimageStatus::Missing(expiry) => Some(expiry), - _ => None, - } - } -} - -// A value placed in storage that represents the current version of the Democracy storage. -// This value is used by the `on_runtime_upgrade` logic to determine whether we run -// storage migration logic. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo)] -enum Releases { - V1, -} - #[frame_support::pallet] pub mod pallet { use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use sp_core::H256; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config + Sized { - type Proposal: Parameter - + Dispatchable - + From>; + type WeightInfo: WeightInfo; type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// The Scheduler. + type Scheduler: ScheduleNamed, Self::PalletsOrigin>; + + /// The Preimage provider. + type Preimages: QueryPreimage + StorePreimage; + /// Currency type for this pallet. type Currency: ReservableCurrency + LockableCurrency; @@ -289,6 +263,39 @@ pub mod pallet { #[pallet::constant] type MinimumDeposit: Get>; + /// Indicator for whether an emergency origin is even allowed to happen. Some chains may + /// want to set this permanently to `false`, others may want to condition it on things such + /// as an upgrade having happened recently. + #[pallet::constant] + type InstantAllowed: Get; + + /// Minimum voting period allowed for a fast-track referendum. + #[pallet::constant] + type FastTrackVotingPeriod: Get; + + /// Period in blocks where an external proposal may not be re-submitted after being vetoed. + #[pallet::constant] + type CooloffPeriod: Get; + + /// The maximum number of votes for an account. + /// + /// Also used to compute weight, an overly big value can + /// lead to extrinsic with very big weight: see `delegate` for instance. + #[pallet::constant] + type MaxVotes: Get; + + /// The maximum number of public proposals that can exist at any time. + #[pallet::constant] + type MaxProposals: Get; + + /// The maximum number of deposits a public proposal may have at any time. + #[pallet::constant] + type MaxDeposits: Get; + + /// The maximum number of items which can be blacklisted. + #[pallet::constant] + type MaxBlacklisted: Get; + /// Origin from which the next tabled referendum may be forced. This is a normal /// "super-majority-required" referendum. type ExternalOrigin: EnsureOrigin; @@ -311,16 +318,6 @@ pub mod pallet { /// origin. It retains its threshold method. type InstantOrigin: EnsureOrigin; - /// Indicator for whether an emergency origin is even allowed to happen. Some chains may - /// want to set this permanently to `false`, others may want to condition it on things such - /// as an upgrade having happened recently. - #[pallet::constant] - type InstantAllowed: Get; - - /// Minimum voting period allowed for a fast-track referendum. - #[pallet::constant] - type FastTrackVotingPeriod: Get; - /// Origin from which any referendum may be cancelled in an emergency. type CancellationOrigin: EnsureOrigin; @@ -331,79 +328,39 @@ pub mod pallet { type CancelProposalOrigin: EnsureOrigin; /// Origin for anyone able to veto proposals. - /// - /// # Warning - /// - /// The number of Vetoers for a proposal must be small, extrinsics are weighted according to - /// [MAX_VETOERS](./const.MAX_VETOERS.html) type VetoOrigin: EnsureOrigin; - /// Period in blocks where an external proposal may not be re-submitted after being vetoed. - #[pallet::constant] - type CooloffPeriod: Get; - - /// The amount of balance that must be deposited per byte of preimage stored. - #[pallet::constant] - type PreimageByteDeposit: Get>; - - /// An origin that can provide a preimage using operational extrinsics. - type OperationalPreimageOrigin: EnsureOrigin; - - /// Handler for the unbalanced reduction when slashing a preimage deposit. - type Slash: OnUnbalanced>; - - /// The Scheduler. - type Scheduler: ScheduleNamed; - /// Overarching type of all pallets origins. type PalletsOrigin: From>; - /// The maximum number of votes for an account. - /// - /// Also used to compute weight, an overly big value can - /// lead to extrinsic with very big weight: see `delegate` for instance. - #[pallet::constant] - type MaxVotes: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - - /// The maximum number of public proposals that can exist at any time. - #[pallet::constant] - type MaxProposals: Get; + /// Handler for the unbalanced reduction when slashing a preimage deposit. + type Slash: OnUnbalanced>; } - // TODO: Refactor public proposal queue into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 /// The number of (public) proposals that have been made so far. #[pallet::storage] #[pallet::getter(fn public_prop_count)] pub type PublicPropCount = StorageValue<_, PropIndex, ValueQuery>; - /// The public proposals. Unsorted. The second item is the proposal's hash. + /// The public proposals. Unsorted. The second item is the proposal. #[pallet::storage] #[pallet::getter(fn public_props)] - pub type PublicProps = - StorageValue<_, Vec<(PropIndex, T::Hash, T::AccountId)>, ValueQuery>; + pub type PublicProps = StorageValue< + _, + BoundedVec<(PropIndex, BoundedCallOf, T::AccountId), T::MaxProposals>, + ValueQuery, + >; /// Those who have locked a deposit. /// /// TWOX-NOTE: Safe, as increasing integer keys are safe. #[pallet::storage] #[pallet::getter(fn deposit_of)] - pub type DepositOf = - StorageMap<_, Twox64Concat, PropIndex, (Vec, BalanceOf)>; - - /// Map of hashes to the proposal preimage, along with who registered it and their deposit. - /// The block number is the block at which it was deposited. - // TODO: Refactor Preimages into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 - #[pallet::storage] - pub type Preimages = StorageMap< + pub type DepositOf = StorageMap< _, - Identity, - T::Hash, - PreimageStatus, T::BlockNumber>, + Twox64Concat, + PropIndex, + (BoundedVec, BalanceOf), >; /// The next free referendum index, aka the number of referenda started so far. @@ -426,7 +383,7 @@ pub mod pallet { _, Twox64Concat, ReferendumIndex, - ReferendumInfo>, + ReferendumInfo, BalanceOf>, >; /// All votes for a particular voter. We store the balance for the number of votes that we @@ -438,14 +395,12 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - Voting, T::AccountId, T::BlockNumber>, + Voting, T::AccountId, T::BlockNumber, T::MaxVotes>, ValueQuery, >; /// True if the last referendum tabled was submitted externally. False if it was a public /// proposal. - // TODO: There should be any number of tabling origins, not just public and "external" - // (council). https://github.com/paritytech/substrate/issues/5322 #[pallet::storage] pub type LastTabledWasExternal = StorageValue<_, bool, ValueQuery>; @@ -454,23 +409,21 @@ pub mod pallet { /// - `LastTabledWasExternal` is `false`; or /// - `PublicProps` is empty. #[pallet::storage] - pub type NextExternal = StorageValue<_, (T::Hash, VoteThreshold)>; + pub type NextExternal = StorageValue<_, (BoundedCallOf, VoteThreshold)>; /// A record of who vetoed what. Maps proposal hash to a possible existent block number /// (until when it may not be resubmitted) and who vetoed it. #[pallet::storage] - pub type Blacklist = - StorageMap<_, Identity, T::Hash, (T::BlockNumber, Vec)>; + pub type Blacklist = StorageMap< + _, + Identity, + H256, + (T::BlockNumber, BoundedVec), + >; /// Record of all proposals that have been subject to emergency cancellation. #[pallet::storage] - pub type Cancellations = StorageMap<_, Identity, T::Hash, bool, ValueQuery>; - - /// Storage version of the pallet. - /// - /// New networks start with last version. - #[pallet::storage] - pub(crate) type StorageVersion = StorageValue<_, Releases>; + pub type Cancellations = StorageMap<_, Identity, H256, bool, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -490,7 +443,6 @@ pub mod pallet { PublicPropCount::::put(0 as PropIndex); ReferendumCount::::put(0 as ReferendumIndex); LowestUnbaked::::put(0 as ReferendumIndex); - StorageVersion::::put(Releases::V1); } } @@ -500,7 +452,7 @@ pub mod pallet { /// A motion has been proposed by a public account. Proposed { proposal_index: PropIndex, deposit: BalanceOf }, /// A public proposal has been tabled for referendum vote. - Tabled { proposal_index: PropIndex, deposit: BalanceOf, depositors: Vec }, + Tabled { proposal_index: PropIndex, deposit: BalanceOf }, /// An external proposal has been tabled. ExternalTabled, /// A referendum has begun. @@ -511,31 +463,14 @@ pub mod pallet { NotPassed { ref_index: ReferendumIndex }, /// A referendum has been cancelled. Cancelled { ref_index: ReferendumIndex }, - /// A proposal has been enacted. - Executed { ref_index: ReferendumIndex, result: DispatchResult }, /// An account has delegated their vote to another account. Delegated { who: T::AccountId, target: T::AccountId }, /// An account has cancelled a previous delegation operation. Undelegated { account: T::AccountId }, /// An external proposal has been vetoed. - Vetoed { who: T::AccountId, proposal_hash: T::Hash, until: T::BlockNumber }, - /// A proposal's preimage was noted, and the deposit taken. - PreimageNoted { proposal_hash: T::Hash, who: T::AccountId, deposit: BalanceOf }, - /// A proposal preimage was removed and used (the deposit was returned). - PreimageUsed { proposal_hash: T::Hash, provider: T::AccountId, deposit: BalanceOf }, - /// A proposal could not be executed because its preimage was invalid. - PreimageInvalid { proposal_hash: T::Hash, ref_index: ReferendumIndex }, - /// A proposal could not be executed because its preimage was missing. - PreimageMissing { proposal_hash: T::Hash, ref_index: ReferendumIndex }, - /// A registered preimage was removed and the deposit collected by the reaper. - PreimageReaped { - proposal_hash: T::Hash, - provider: T::AccountId, - deposit: BalanceOf, - reaper: T::AccountId, - }, + Vetoed { who: T::AccountId, proposal_hash: H256, until: T::BlockNumber }, /// A proposal_hash has been blacklisted permanently. - Blacklisted { proposal_hash: T::Hash }, + Blacklisted { proposal_hash: H256 }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote> }, /// An account has secconded a proposal @@ -564,20 +499,8 @@ pub mod pallet { NoProposal, /// Identity may not veto a proposal twice AlreadyVetoed, - /// Preimage already noted - DuplicatePreimage, - /// Not imminent - NotImminent, - /// Too early - TooEarly, - /// Imminent - Imminent, - /// Preimage not found - PreimageMissing, /// Vote given for invalid referendum ReferendumInvalid, - /// Invalid preimage - PreimageInvalid, /// No proposals waiting NoneWaiting, /// The given account did not vote on the referendum. @@ -601,8 +524,8 @@ pub mod pallet { WrongUpperBound, /// Maximum number of votes reached. MaxVotesReached, - /// Maximum number of proposals reached. - TooManyProposals, + /// Maximum number of items reached. + TooMany, /// Voting period too low VotingPeriodLow, } @@ -626,12 +549,10 @@ pub mod pallet { /// - `value`: The amount of deposit (must be at least `MinimumDeposit`). /// /// Emits `Proposed`. - /// - /// Weight: `O(p)` #[pallet::weight(T::WeightInfo::propose())] pub fn propose( origin: OriginFor, - proposal_hash: T::Hash, + proposal: BoundedCallOf, #[pallet::compact] value: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; @@ -640,7 +561,8 @@ pub mod pallet { let index = Self::public_prop_count(); let real_prop_count = PublicProps::::decode_len().unwrap_or(0) as u32; let max_proposals = T::MaxProposals::get(); - ensure!(real_prop_count < max_proposals, Error::::TooManyProposals); + ensure!(real_prop_count < max_proposals, Error::::TooMany); + let proposal_hash = proposal.hash(); if let Some((until, _)) = >::get(proposal_hash) { ensure!( @@ -650,10 +572,14 @@ pub mod pallet { } T::Currency::reserve(&who, value)?; + + let depositors = BoundedVec::<_, T::MaxDeposits>::truncate_from(vec![who.clone()]); + DepositOf::::insert(index, (depositors, value)); + PublicPropCount::::put(index + 1); - >::insert(index, (&[&who][..], value)); - >::append((index, proposal_hash, who)); + PublicProps::::try_append((index, proposal, who)) + .map_err(|_| Error::::TooMany)?; Self::deposit_event(Event::::Proposed { proposal_index: index, deposit: value }); Ok(()) @@ -665,23 +591,19 @@ pub mod pallet { /// must have funds to cover the deposit, equal to the original deposit. /// /// - `proposal`: The index of the proposal to second. - /// - `seconds_upper_bound`: an upper bound on the current number of seconds on this - /// proposal. Extrinsic is weighted according to this value with no refund. - /// - /// Weight: `O(S)` where S is the number of seconds a proposal already has. - #[pallet::weight(T::WeightInfo::second(*seconds_upper_bound))] + #[pallet::weight(T::WeightInfo::second())] pub fn second( origin: OriginFor, #[pallet::compact] proposal: PropIndex, - #[pallet::compact] seconds_upper_bound: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; let seconds = Self::len_of_deposit_of(proposal).ok_or(Error::::ProposalMissing)?; - ensure!(seconds <= seconds_upper_bound, Error::::WrongUpperBound); + ensure!(seconds < T::MaxDeposits::get(), Error::::TooMany); let mut deposit = Self::deposit_of(proposal).ok_or(Error::::ProposalMissing)?; T::Currency::reserve(&who, deposit.1)?; - deposit.0.push(who.clone()); + let ok = deposit.0.try_push(who.clone()).is_ok(); + debug_assert!(ok, "`seconds` is below static limit; `try_insert` should succeed; qed"); >::insert(proposal, deposit); Self::deposit_event(Event::::Seconded { seconder: who, prop_index: proposal }); Ok(()) @@ -694,12 +616,7 @@ pub mod pallet { /// /// - `ref_index`: The index of the referendum to vote for. /// - `vote`: The vote configuration. - /// - /// Weight: `O(R)` where R is the number of referendums the voter has voted on. - #[pallet::weight( - T::WeightInfo::vote_new(T::MaxVotes::get()) - .max(T::WeightInfo::vote_existing(T::MaxVotes::get())) - )] + #[pallet::weight(T::WeightInfo::vote_new().max(T::WeightInfo::vote_existing()))] pub fn vote( origin: OriginFor, #[pallet::compact] ref_index: ReferendumIndex, @@ -725,7 +642,7 @@ pub mod pallet { T::CancellationOrigin::ensure_origin(origin)?; let status = Self::referendum_status(ref_index)?; - let h = status.proposal_hash; + let h = status.proposal.hash(); ensure!(!>::contains_key(h), Error::::AlreadyCanceled); >::insert(h, true); @@ -739,20 +656,20 @@ pub mod pallet { /// The dispatch origin of this call must be `ExternalOrigin`. /// /// - `proposal_hash`: The preimage hash of the proposal. - /// - /// Weight: `O(V)` with V number of vetoers in the blacklist of proposal. - /// Decoding vec of length V. Charged as maximum - #[pallet::weight(T::WeightInfo::external_propose(MAX_VETOERS))] - pub fn external_propose(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { + #[pallet::weight(T::WeightInfo::external_propose())] + pub fn external_propose( + origin: OriginFor, + proposal: BoundedCallOf, + ) -> DispatchResult { T::ExternalOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::DuplicateProposal); - if let Some((until, _)) = >::get(proposal_hash) { + if let Some((until, _)) = >::get(proposal.hash()) { ensure!( >::block_number() >= until, Error::::ProposalBlacklisted, ); } - >::put((proposal_hash, VoteThreshold::SuperMajorityApprove)); + >::put((proposal, VoteThreshold::SuperMajorityApprove)); Ok(()) } @@ -770,10 +687,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::external_propose_majority())] pub fn external_propose_majority( origin: OriginFor, - proposal_hash: T::Hash, + proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; - >::put((proposal_hash, VoteThreshold::SimpleMajority)); + >::put((proposal, VoteThreshold::SimpleMajority)); Ok(()) } @@ -791,10 +708,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::external_propose_default())] pub fn external_propose_default( origin: OriginFor, - proposal_hash: T::Hash, + proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalDefaultOrigin::ensure_origin(origin)?; - >::put((proposal_hash, VoteThreshold::SuperMajorityAgainst)); + >::put((proposal, VoteThreshold::SuperMajorityAgainst)); Ok(()) } @@ -805,7 +722,7 @@ pub mod pallet { /// The dispatch of this call must be `FastTrackOrigin`. /// /// - `proposal_hash`: The hash of the current external proposal. - /// - `voting_period`: The period that is allowed for voting on this proposal. + /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to /// Must be always greater than zero. /// For `FastTrackOrigin` must be equal or greater than `FastTrackVotingPeriod`. /// - `delay`: The number of block after voting has ended in approval and this should be @@ -817,7 +734,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::fast_track())] pub fn fast_track( origin: OriginFor, - proposal_hash: T::Hash, + proposal_hash: H256, voting_period: T::BlockNumber, delay: T::BlockNumber, ) -> DispatchResult { @@ -836,20 +753,21 @@ pub mod pallet { T::InstantOrigin::ensure_origin(ensure_instant)?; ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } + ensure!(voting_period > T::BlockNumber::zero(), Error::::VotingPeriodLow); - let (e_proposal_hash, threshold) = + let (ext_proposal, threshold) = >::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, ); - ensure!(proposal_hash == e_proposal_hash, Error::::InvalidHash); + ensure!(proposal_hash == ext_proposal.hash(), Error::::InvalidHash); >::kill(); let now = >::block_number(); Self::inject_referendum( now.saturating_add(voting_period), - proposal_hash, + ext_proposal, threshold, delay, ); @@ -865,22 +783,24 @@ pub mod pallet { /// Emits `Vetoed`. /// /// Weight: `O(V + log(V))` where V is number of `existing vetoers` - #[pallet::weight(T::WeightInfo::veto_external(MAX_VETOERS))] - pub fn veto_external(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { + #[pallet::weight(T::WeightInfo::veto_external())] + pub fn veto_external(origin: OriginFor, proposal_hash: H256) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; - if let Some((e_proposal_hash, _)) = >::get() { - ensure!(proposal_hash == e_proposal_hash, Error::::ProposalMissing); + if let Some((ext_proposal, _)) = NextExternal::::get() { + ensure!(proposal_hash == ext_proposal.hash(), Error::::ProposalMissing); } else { return Err(Error::::NoProposal.into()) } let mut existing_vetoers = - >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_else(Vec::new); + >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_default(); let insert_position = existing_vetoers.binary_search(&who).err().ok_or(Error::::AlreadyVetoed)?; + existing_vetoers + .try_insert(insert_position, who.clone()) + .map_err(|_| Error::::TooMany)?; - existing_vetoers.insert(insert_position, who.clone()); let until = >::block_number().saturating_add(T::CooloffPeriod::get()); >::insert(&proposal_hash, (until, existing_vetoers)); @@ -907,21 +827,6 @@ pub mod pallet { Ok(()) } - /// Cancel a proposal queued for enactment. - /// - /// The dispatch origin of this call must be _Root_. - /// - /// - `which`: The index of the referendum to cancel. - /// - /// Weight: `O(D)` where `D` is the items in the dispatch queue. Weighted as `D = 10`. - #[pallet::weight((T::WeightInfo::cancel_queued(10), DispatchClass::Operational))] - pub fn cancel_queued(origin: OriginFor, which: ReferendumIndex) -> DispatchResult { - ensure_root(origin)?; - T::Scheduler::cancel_named((DEMOCRACY_ID, which).encode()) - .map_err(|_| Error::::ProposalMissing)?; - Ok(()) - } - /// Delegate the voting power (with some given conviction) of the sending account. /// /// The balance delegated is locked for as long as it's delegated, and thereafter for the @@ -991,135 +896,6 @@ pub mod pallet { Ok(()) } - /// Register the preimage for an upcoming proposal. This doesn't require the proposal to be - /// in the dispatch queue but does require a deposit, returned once enacted. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `encoded_proposal`: The preimage of a proposal. - /// - /// Emits `PreimageNoted`. - /// - /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - #[pallet::weight(T::WeightInfo::note_preimage(encoded_proposal.len() as u32))] - pub fn note_preimage(origin: OriginFor, encoded_proposal: Vec) -> DispatchResult { - Self::note_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; - Ok(()) - } - - /// Same as `note_preimage` but origin is `OperationalPreimageOrigin`. - #[pallet::weight(( - T::WeightInfo::note_preimage(encoded_proposal.len() as u32), - DispatchClass::Operational, - ))] - pub fn note_preimage_operational( - origin: OriginFor, - encoded_proposal: Vec, - ) -> DispatchResult { - let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; - Self::note_preimage_inner(who, encoded_proposal)?; - Ok(()) - } - - /// Register the preimage for an upcoming proposal. This requires the proposal to be - /// in the dispatch queue. No deposit is needed. When this call is successful, i.e. - /// the preimage has not been uploaded before and matches some imminent proposal, - /// no fee is paid. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `encoded_proposal`: The preimage of a proposal. - /// - /// Emits `PreimageNoted`. - /// - /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - #[pallet::weight(T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32))] - pub fn note_imminent_preimage( - origin: OriginFor, - encoded_proposal: Vec, - ) -> DispatchResultWithPostInfo { - Self::note_imminent_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; - // We check that this preimage was not uploaded before in - // `note_imminent_preimage_inner`, thus this call can only be successful once. If - // successful, user does not pay a fee. - Ok(Pays::No.into()) - } - - /// Same as `note_imminent_preimage` but origin is `OperationalPreimageOrigin`. - #[pallet::weight(( - T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32), - DispatchClass::Operational, - ))] - pub fn note_imminent_preimage_operational( - origin: OriginFor, - encoded_proposal: Vec, - ) -> DispatchResultWithPostInfo { - let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; - Self::note_imminent_preimage_inner(who, encoded_proposal)?; - // We check that this preimage was not uploaded before in - // `note_imminent_preimage_inner`, thus this call can only be successful once. If - // successful, user does not pay a fee. - Ok(Pays::No.into()) - } - - /// Remove an expired proposal preimage and collect the deposit. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `proposal_hash`: The preimage hash of a proposal. - /// - `proposal_length_upper_bound`: an upper bound on length of the proposal. Extrinsic is - /// weighted according to this value with no refund. - /// - /// This will only work after `VotingPeriod` blocks from the time that the preimage was - /// noted, if it's the same account doing it. If it's a different account, then it'll only - /// work an additional `EnactmentPeriod` later. - /// - /// Emits `PreimageReaped`. - /// - /// Weight: `O(D)` where D is length of proposal. - #[pallet::weight(T::WeightInfo::reap_preimage(*proposal_len_upper_bound))] - pub fn reap_preimage( - origin: OriginFor, - proposal_hash: T::Hash, - #[pallet::compact] proposal_len_upper_bound: u32, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - ensure!( - Self::pre_image_data_len(proposal_hash)? <= proposal_len_upper_bound, - Error::::WrongUpperBound, - ); - - let (provider, deposit, since, expiry) = >::get(&proposal_hash) - .and_then(|m| match m { - PreimageStatus::Available { provider, deposit, since, expiry, .. } => - Some((provider, deposit, since, expiry)), - _ => None, - }) - .ok_or(Error::::PreimageMissing)?; - - let now = >::block_number(); - let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); - let additional = if who == provider { Zero::zero() } else { enactment }; - ensure!( - now >= since.saturating_add(voting).saturating_add(additional), - Error::::TooEarly - ); - ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); - - let res = - T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); - debug_assert!(res.is_ok()); - >::remove(&proposal_hash); - Self::deposit_event(Event::::PreimageReaped { - proposal_hash, - provider, - deposit, - reaper: who, - }); - Ok(()) - } - /// Unlock tokens that have an expired lock. /// /// The dispatch origin of this call must be _Signed_. @@ -1127,10 +903,7 @@ pub mod pallet { /// - `target`: The account to remove the lock on. /// /// Weight: `O(R)` with R number of vote of target. - #[pallet::weight( - T::WeightInfo::unlock_set(T::MaxVotes::get()) - .max(T::WeightInfo::unlock_remove(T::MaxVotes::get())) - )] + #[pallet::weight(T::WeightInfo::unlock_set(T::MaxVotes::get()).max(T::WeightInfo::unlock_remove(T::MaxVotes::get())))] pub fn unlock(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; @@ -1199,17 +972,6 @@ pub mod pallet { Ok(()) } - /// Enact a proposal from a referendum. For now we just make the weight be the maximum. - #[pallet::weight(T::BlockWeights::get().max_block)] - pub fn enact_proposal( - origin: OriginFor, - proposal_hash: T::Hash, - index: ReferendumIndex, - ) -> DispatchResult { - ensure_root(origin)?; - Self::do_enact_proposal(proposal_hash, index) - } - /// Permanently place a proposal into the blacklist. This prevents it from ever being /// proposed again. /// @@ -1225,21 +987,21 @@ pub mod pallet { /// /// Weight: `O(p)` (though as this is an high-privilege dispatch, we assume it has a /// reasonable value). - #[pallet::weight((T::WeightInfo::blacklist(T::MaxProposals::get()), DispatchClass::Operational))] + #[pallet::weight((T::WeightInfo::blacklist(), DispatchClass::Operational))] pub fn blacklist( origin: OriginFor, - proposal_hash: T::Hash, + proposal_hash: H256, maybe_ref_index: Option, ) -> DispatchResult { T::BlacklistOrigin::ensure_origin(origin)?; // Insert the proposal into the blacklist. - let permanent = (T::BlockNumber::max_value(), Vec::::new()); + let permanent = (T::BlockNumber::max_value(), BoundedVec::::default()); Blacklist::::insert(&proposal_hash, permanent); // Remove the queued proposal, if it's there. PublicProps::::mutate(|props| { - if let Some(index) = props.iter().position(|p| p.1 == proposal_hash) { + if let Some(index) = props.iter().position(|p| p.1.hash() == proposal_hash) { let (prop_index, ..) = props.remove(index); if let Some((whos, amount)) = DepositOf::::take(prop_index) { for who in whos.into_iter() { @@ -1250,14 +1012,14 @@ pub mod pallet { }); // Remove the external queued referendum, if it's there. - if matches!(NextExternal::::get(), Some((h, ..)) if h == proposal_hash) { + if matches!(NextExternal::::get(), Some((p, ..)) if p.hash() == proposal_hash) { NextExternal::::kill(); } // Remove the referendum, if it's there. if let Some(ref_index) = maybe_ref_index { if let Ok(status) = Self::referendum_status(ref_index) { - if status.proposal_hash == proposal_hash { + if status.proposal.hash() == proposal_hash { Self::internal_cancel_referendum(ref_index); } } @@ -1274,7 +1036,7 @@ pub mod pallet { /// - `prop_index`: The index of the proposal to cancel. /// /// Weight: `O(p)` where `p = PublicProps::::decode_len()` - #[pallet::weight(T::WeightInfo::cancel_proposal(T::MaxProposals::get()))] + #[pallet::weight(T::WeightInfo::cancel_proposal())] pub fn cancel_proposal( origin: OriginFor, #[pallet::compact] prop_index: PropIndex, @@ -1294,6 +1056,25 @@ pub mod pallet { } } +pub trait EncodeInto: Encode { + fn encode_into + Default>(&self) -> T { + let mut t = T::default(); + self.using_encoded(|data| { + if data.len() <= t.as_mut().len() { + t.as_mut()[0..data.len()].copy_from_slice(data); + } else { + // encoded self is too big to fit into a T. hash it and use the first bytes of that + // instead. + let hash = sp_io::hashing::blake2_256(data); + let l = t.as_mut().len().min(hash.len()); + t.as_mut()[0..l].copy_from_slice(&hash[0..l]); + } + }); + t + } +} +impl EncodeInto for T {} + impl Pallet { // exposed immutables. @@ -1306,7 +1087,7 @@ impl Pallet { /// Get all referenda ready for tally at block `n`. pub fn maturing_referenda_at( n: T::BlockNumber, - ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { let next = Self::lowest_unbaked(); let last = Self::referendum_count(); Self::maturing_referenda_at_inner(n, next..last) @@ -1315,7 +1096,7 @@ impl Pallet { fn maturing_referenda_at_inner( n: T::BlockNumber, range: core::ops::Range, - ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { range .into_iter() .map(|i| (i, Self::referendum_info(i))) @@ -1331,13 +1112,13 @@ impl Pallet { /// Start a referendum. pub fn internal_start_referendum( - proposal_hash: T::Hash, + proposal: BoundedCallOf, threshold: VoteThreshold, delay: T::BlockNumber, ) -> ReferendumIndex { >::inject_referendum( >::block_number().saturating_add(T::VotingPeriod::get()), - proposal_hash, + proposal, threshold, delay, ) @@ -1353,8 +1134,8 @@ impl Pallet { /// Ok if the given referendum is active, Err otherwise fn ensure_ongoing( - r: ReferendumInfo>, - ) -> Result>, DispatchError> { + r: ReferendumInfo, BalanceOf>, + ) -> Result, BalanceOf>, DispatchError> { match r { ReferendumInfo::Ongoing(s) => Ok(s), _ => Err(Error::::ReferendumInvalid.into()), @@ -1363,7 +1144,7 @@ impl Pallet { fn referendum_status( ref_index: ReferendumIndex, - ) -> Result>, DispatchError> { + ) -> Result, BalanceOf>, DispatchError> { let info = ReferendumInfoOf::::get(ref_index).ok_or(Error::::ReferendumInvalid)?; Self::ensure_ongoing(info) } @@ -1388,11 +1169,9 @@ impl Pallet { votes[i].1 = vote; }, Err(i) => { - ensure!( - votes.len() as u32 <= T::MaxVotes::get(), - Error::::MaxVotesReached - ); - votes.insert(i, (ref_index, vote)); + votes + .try_insert(i, (ref_index, vote)) + .map_err(|_| Error::::MaxVotesReached)?; }, } Self::deposit_event(Event::::Voted { voter: who.clone(), ref_index, vote }); @@ -1606,14 +1385,14 @@ impl Pallet { /// Start a referendum fn inject_referendum( end: T::BlockNumber, - proposal_hash: T::Hash, + proposal: BoundedCallOf, threshold: VoteThreshold, delay: T::BlockNumber, ) -> ReferendumIndex { let ref_index = Self::referendum_count(); ReferendumCount::::put(ref_index + 1); let status = - ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; + ReferendumStatus { end, proposal, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); >::insert(ref_index, item); Self::deposit_event(Event::::Started { ref_index, threshold }); @@ -1659,14 +1438,10 @@ impl Pallet { if let Some((depositors, deposit)) = >::take(prop_index) { // refund depositors - for d in &depositors { + for d in depositors.iter() { T::Currency::unreserve(d, deposit); } - Self::deposit_event(Event::::Tabled { - proposal_index: prop_index, - deposit, - depositors, - }); + Self::deposit_event(Event::::Tabled { proposal_index: prop_index, deposit }); Self::inject_referendum( now.saturating_add(T::VotingPeriod::get()), proposal, @@ -1680,71 +1455,35 @@ impl Pallet { } } - fn do_enact_proposal(proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { - let preimage = >::take(&proposal_hash); - if let Some(PreimageStatus::Available { data, provider, deposit, .. }) = preimage { - if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { - let err_amount = T::Currency::unreserve(&provider, deposit); - debug_assert!(err_amount.is_zero()); - Self::deposit_event(Event::::PreimageUsed { proposal_hash, provider, deposit }); - - let res = proposal - .dispatch(frame_system::RawOrigin::Root.into()) - .map(|_| ()) - .map_err(|e| e.error); - Self::deposit_event(Event::::Executed { ref_index: index, result: res }); - - Ok(()) - } else { - T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); - Self::deposit_event(Event::::PreimageInvalid { - proposal_hash, - ref_index: index, - }); - Err(Error::::PreimageInvalid.into()) - } - } else { - Self::deposit_event(Event::::PreimageMissing { proposal_hash, ref_index: index }); - Err(Error::::PreimageMissing.into()) - } - } - fn bake_referendum( now: T::BlockNumber, index: ReferendumIndex, - status: ReferendumStatus>, + status: ReferendumStatus, BalanceOf>, ) -> bool { let total_issuance = T::Currency::total_issuance(); let approved = status.threshold.approved(status.tally, total_issuance); if approved { Self::deposit_event(Event::::Passed { ref_index: index }); - if status.delay.is_zero() { - let _ = Self::do_enact_proposal(status.proposal_hash, index); - } else { - let when = now.saturating_add(status.delay); - // Note that we need the preimage now. - Preimages::::mutate_exists( - &status.proposal_hash, - |maybe_pre| match *maybe_pre { - Some(PreimageStatus::Available { ref mut expiry, .. }) => - *expiry = Some(when), - ref mut a => *a = Some(PreimageStatus::Missing(when)), - }, - ); - - if T::Scheduler::schedule_named( - (DEMOCRACY_ID, index).encode(), - DispatchTime::At(when), - None, - 63, - frame_system::RawOrigin::Root.into(), - Call::enact_proposal { proposal_hash: status.proposal_hash, index }.into(), - ) - .is_err() - { - frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); - } + // Actually `hold` the proposal now since we didn't hold it when it came in via the + // submit extrinsic and we now know that it will be needed. This will be reversed by + // Scheduler pallet once it is executed which assumes that we will already have placed + // a `hold` on it. + T::Preimages::hold(&status.proposal); + + // Earliest it can be scheduled for is next block. + let when = now.saturating_add(status.delay.max(One::one())); + if T::Scheduler::schedule_named( + (DEMOCRACY_ID, index).encode_into(), + DispatchTime::At(when), + None, + 63, + frame_system::RawOrigin::Root.into(), + status.proposal, + ) + .is_err() + { + frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); } } else { Self::deposit_event(Event::::NotPassed { ref_index: index }); @@ -1780,11 +1519,10 @@ impl Pallet { if Self::launch_next(now).is_ok() { weight = max_block_weight; } else { - weight = - weight.saturating_add(T::WeightInfo::on_initialize_base_with_launch_period(r)); + weight.saturating_accrue(T::WeightInfo::on_initialize_base_with_launch_period(r)); } } else { - weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); + weight.saturating_accrue(T::WeightInfo::on_initialize_base(r)); } // tally up votes for any expiring referenda. @@ -1795,8 +1533,8 @@ impl Pallet { } // Notes: - // * We don't consider the lowest unbaked to be the last maturing in case some refendum have - // longer voting period than others. + // * We don't consider the lowest unbaked to be the last maturing in case some referenda + // have a longer voting period than others. // * The iteration here shouldn't trigger any storage read that are not in cache, due to // `maturing_referenda_at_inner` having already read them. // * We shouldn't iterate more than `LaunchPeriod/VotingPeriod + 1` times because the number @@ -1822,116 +1560,6 @@ impl Pallet { // `Compact`. decode_compact_u32_at(&>::hashed_key_for(proposal)) } - - /// Check that pre image exists and its value is variant `PreimageStatus::Missing`. - /// - /// This check is done without getting the complete value in the runtime to avoid copying a big - /// value in the runtime. - fn check_pre_image_is_missing(proposal_hash: T::Hash) -> DispatchResult { - // To decode the enum variant we only need the first byte. - let mut buf = [0u8; 1]; - let key = >::hashed_key_for(proposal_hash); - let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or(Error::::NotImminent)?; - // The value may be smaller that 1 byte. - let mut input = &buf[0..buf.len().min(bytes as usize)]; - - match input.read_byte() { - Ok(0) => Ok(()), // PreimageStatus::Missing is variant 0 - Ok(1) => Err(Error::::DuplicatePreimage.into()), - _ => { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - Err(Error::::NotImminent.into()) - }, - } - } - - /// Check that pre image exists, its value is variant `PreimageStatus::Available` and decode - /// the length of `data: Vec` fields. - /// - /// This check is done without getting the complete value in the runtime to avoid copying a big - /// value in the runtime. - /// - /// If the pre image is missing variant or doesn't exist then the error `PreimageMissing` is - /// returned. - fn pre_image_data_len(proposal_hash: T::Hash) -> Result { - // To decode the `data` field of Available variant we need: - // * one byte for the variant - // * at most 5 bytes to decode a `Compact` - let mut buf = [0u8; 6]; - let key = >::hashed_key_for(proposal_hash); - let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or(Error::::PreimageMissing)?; - // The value may be smaller that 6 bytes. - let mut input = &buf[0..buf.len().min(bytes as usize)]; - - match input.read_byte() { - Ok(1) => (), // Check that input exists and is second variant. - Ok(0) => return Err(Error::::PreimageMissing.into()), - _ => { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - return Err(Error::::PreimageMissing.into()) - }, - } - - // Decode the length of the vector. - let len = codec::Compact::::decode(&mut input) - .map_err(|_| { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - DispatchError::from(Error::::PreimageMissing) - })? - .0; - - Ok(len) - } - - // See `note_preimage` - fn note_preimage_inner(who: T::AccountId, encoded_proposal: Vec) -> DispatchResult { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - ensure!(!>::contains_key(&proposal_hash), Error::::DuplicatePreimage); - - let deposit = >::from(encoded_proposal.len() as u32) - .saturating_mul(T::PreimageByteDeposit::get()); - T::Currency::reserve(&who, deposit)?; - - let now = >::block_number(); - let a = PreimageStatus::Available { - data: encoded_proposal, - provider: who.clone(), - deposit, - since: now, - expiry: None, - }; - >::insert(proposal_hash, a); - - Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit }); - - Ok(()) - } - - // See `note_imminent_preimage` - fn note_imminent_preimage_inner( - who: T::AccountId, - encoded_proposal: Vec, - ) -> DispatchResult { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - Self::check_pre_image_is_missing(proposal_hash)?; - let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; - let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; - - let now = >::block_number(); - let free = >::zero(); - let a = PreimageStatus::Available { - data: encoded_proposal, - provider: who.clone(), - deposit: Zero::zero(), - since: now, - expiry: Some(expiry), - }; - >::insert(proposal_hash, a); - - Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit: free }); - - Ok(()) - } } /// Decode `Compact` from the trie at given key. diff --git a/frame/democracy/src/migrations.rs b/frame/democracy/src/migrations.rs new file mode 100644 index 0000000000000..3ec249c1d981c --- /dev/null +++ b/frame/democracy/src/migrations.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the preimage pallet. + +use super::*; +use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, BoundedVec}; +use sp_core::H256; + +/// The log target. +const TARGET: &'static str = "runtime::democracy::migration::v1"; + +/// The original data layout of the democracy pallet without a specific version number. +mod v0 { + use super::*; + + #[storage_alias] + pub type PublicProps = StorageValue< + Pallet, + Vec<(PropIndex, ::Hash, ::AccountId)>, + ValueQuery, + >; + + #[storage_alias] + pub type NextExternal = + StorageValue, (::Hash, VoteThreshold)>; + + #[cfg(feature = "try-runtime")] + #[storage_alias] + pub type ReferendumInfoOf = StorageMap< + Pallet, + frame_support::Twox64Concat, + ReferendumIndex, + ReferendumInfo< + ::BlockNumber, + ::Hash, + BalanceOf, + >, + >; +} + +pub mod v1 { + use super::*; + + /// Migration for translating bare `Hash`es into `Bounded`s. + pub struct Migration(sp_std::marker::PhantomData); + + impl> OnRuntimeUpgrade for Migration { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); + + let props_count = v0::PublicProps::::get().len(); + log::info!(target: TARGET, "{} public proposals will be migrated.", props_count,); + ensure!(props_count <= T::MaxProposals::get() as usize, "too many proposals"); + + let referenda_count = v0::ReferendumInfoOf::::iter().count(); + log::info!(target: TARGET, "{} referenda will be migrated.", referenda_count); + + Ok((props_count as u32, referenda_count as u32).encode()) + } + + #[allow(deprecated)] + fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + if StorageVersion::get::>() != 0 { + log::warn!( + target: TARGET, + "skipping on_runtime_upgrade: executed on wrong storage version.\ + Expected version 0" + ); + return weight + } + + ReferendumInfoOf::::translate( + |index, old: ReferendumInfo>| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + log::info!(target: TARGET, "migrating referendum #{:?}", &index); + Some(match old { + ReferendumInfo::Ongoing(status) => + ReferendumInfo::Ongoing(ReferendumStatus { + end: status.end, + proposal: Bounded::from_legacy_hash(status.proposal), + threshold: status.threshold, + delay: status.delay, + tally: status.tally, + }), + ReferendumInfo::Finished { approved, end } => + ReferendumInfo::Finished { approved, end }, + }) + }, + ); + + let props = v0::PublicProps::::take() + .into_iter() + .map(|(i, hash, a)| (i, Bounded::from_legacy_hash(hash), a)) + .collect::>(); + let bounded = BoundedVec::<_, T::MaxProposals>::truncate_from(props.clone()); + PublicProps::::put(bounded); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + if props.len() as u32 > T::MaxProposals::get() { + log::error!( + target: TARGET, + "truncated {} public proposals to {}; continuing", + props.len(), + T::MaxProposals::get() + ); + } + + if let Some((hash, threshold)) = v0::NextExternal::::take() { + log::info!(target: TARGET, "migrating next external proposal"); + NextExternal::::put((Bounded::from_legacy_hash(hash), threshold)); + } + + StorageVersion::new(1).put::>(); + + weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); + + let (old_props_count, old_ref_count): (u32, u32) = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + let new_props_count = crate::PublicProps::::get().len() as u32; + assert_eq!(new_props_count, old_props_count, "must migrate all public proposals"); + let new_ref_count = crate::ReferendumInfoOf::::iter().count() as u32; + assert_eq!(new_ref_count, old_ref_count, "must migrate all referenda"); + + log::info!( + target: TARGET, + "{} public proposals migrated, {} referenda migrated", + new_props_count, + new_ref_count, + ); + Ok(()) + } + } +} + +#[cfg(test)] +#[cfg(feature = "try-runtime")] +mod test { + use super::*; + use crate::{ + tests::{Test as T, *}, + types::*, + }; + use frame_support::bounded_vec; + + #[allow(deprecated)] + #[test] + fn migration_works() { + new_test_ext().execute_with(|| { + assert_eq!(StorageVersion::get::>(), 0); + // Insert some values into the v0 storage: + + // Case 1: Ongoing referendum + let hash = H256::repeat_byte(1); + let status = ReferendumStatus { + end: 1u32.into(), + proposal: hash.clone(), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 1u32.into(), + tally: Tally { ayes: 1u32.into(), nays: 1u32.into(), turnout: 1u32.into() }, + }; + v0::ReferendumInfoOf::::insert(1u32, ReferendumInfo::Ongoing(status)); + + // Case 2: Finished referendum + v0::ReferendumInfoOf::::insert( + 2u32, + ReferendumInfo::Finished { approved: true, end: 123u32.into() }, + ); + + // Case 3: Public proposals + let hash2 = H256::repeat_byte(2); + v0::PublicProps::::put(vec![ + (3u32, hash.clone(), 123u64), + (4u32, hash2.clone(), 123u64), + ]); + + // Case 4: Next external + v0::NextExternal::::put((hash.clone(), VoteThreshold::SuperMajorityApprove)); + + // Migrate. + let state = v1::Migration::::pre_upgrade().unwrap(); + let _weight = v1::Migration::::on_runtime_upgrade(); + v1::Migration::::post_upgrade(state).unwrap(); + // Check that all values got migrated. + + // Case 1: Ongoing referendum + assert_eq!( + ReferendumInfoOf::::get(1u32), + Some(ReferendumInfo::Ongoing(ReferendumStatus { + end: 1u32.into(), + proposal: Bounded::from_legacy_hash(hash), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 1u32.into(), + tally: Tally { ayes: 1u32.into(), nays: 1u32.into(), turnout: 1u32.into() }, + })) + ); + // Case 2: Finished referendum + assert_eq!( + ReferendumInfoOf::::get(2u32), + Some(ReferendumInfo::Finished { approved: true, end: 123u32.into() }) + ); + // Case 3: Public proposals + let props: BoundedVec<_, ::MaxProposals> = bounded_vec![ + (3u32, Bounded::from_legacy_hash(hash), 123u64), + (4u32, Bounded::from_legacy_hash(hash2), 123u64) + ]; + assert_eq!(PublicProps::::get(), props); + // Case 4: Next external + assert_eq!( + NextExternal::::get(), + Some((Bounded::from_legacy_hash(hash), VoteThreshold::SuperMajorityApprove)) + ); + }); + } +} diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 03d7216fd5aaa..eceb1a3400bba 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -19,11 +19,11 @@ use super::*; use crate as pallet_democracy; -use codec::Encode; use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, SortedMembers, + ConstU32, ConstU64, Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, + SortedMembers, StorePreimage, }, weights::Weight, }; @@ -35,14 +35,12 @@ use sp_runtime::{ traits::{BadOrigin, BlakeTwo256, IdentityLookup}, Perbill, }; - mod cancellation; mod decoders; mod delegation; mod external_proposing; mod fast_tracking; mod lock_voting; -mod preimage; mod public_proposals; mod scheduling; mod voting; @@ -63,6 +61,7 @@ frame_support::construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Preimage: pallet_preimage, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, } @@ -78,13 +77,11 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - Weight::from_ref_time(1_000_000).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::constants::WEIGHT_PER_SECOND.set_proof_size(u64::MAX)); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; - type BlockWeights = (); + type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; @@ -111,6 +108,16 @@ impl frame_system::Config for Test { parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } + +impl pallet_preimage::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type BaseDeposit = ConstU64<0>; + type ByteDeposit = ConstU64<0>; +} + impl pallet_scheduler::Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeOrigin = RuntimeOrigin; @@ -118,11 +125,10 @@ impl pallet_scheduler::Config for Test { type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; - type MaxScheduledPerBlock = (); + type MaxScheduledPerBlock = ConstU32<100>; type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = (); - type NoPreimagePostponement = (); + type Preimages = (); } impl pallet_balances::Config for Test { @@ -158,7 +164,6 @@ impl SortedMembers for OneToFive { } impl Config for Test { - type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type Currency = pallet_balances::Pallet; type EnactmentPeriod = ConstU64<2>; @@ -167,6 +172,8 @@ impl Config for Test { type VoteLockingPeriod = ConstU64<3>; type FastTrackVotingPeriod = ConstU64<2>; type MinimumDeposit = ConstU64<1>; + type MaxDeposits = ConstU32<1000>; + type MaxBlacklisted = ConstU32<5>; type ExternalOrigin = EnsureSignedBy; type ExternalMajorityOrigin = EnsureSignedBy; type ExternalDefaultOrigin = EnsureSignedBy; @@ -176,16 +183,15 @@ impl Config for Test { type CancelProposalOrigin = EnsureRoot; type VetoOrigin = EnsureSignedBy; type CooloffPeriod = ConstU64<2>; - type PreimageByteDeposit = PreimageByteDeposit; type Slash = (); type InstantOrigin = EnsureSignedBy; type InstantAllowed = InstantAllowed; type Scheduler = Scheduler; type MaxVotes = ConstU32<100>; - type OperationalPreimageOrigin = EnsureSignedBy; type PalletsOrigin = OriginCaller; type WeightInfo = (); type MaxProposals = ConstU32<100>; + type Preimages = Preimage; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -203,12 +209,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -/// Execute the function two times, with `true` and with `false`. -pub fn new_test_ext_execute_with_cond(execute: impl FnOnce(bool) -> () + Clone) { - new_test_ext().execute_with(|| (execute.clone())(false)); - new_test_ext().execute_with(|| execute(true)); -} - #[test] fn params_should_work() { new_test_ext().execute_with(|| { @@ -218,44 +218,22 @@ fn params_should_work() { }); } -fn set_balance_proposal(value: u64) -> Vec { - RuntimeCall::Balances(pallet_balances::Call::set_balance { - who: 42, - new_free: value, - new_reserved: 0, - }) - .encode() +fn set_balance_proposal(value: u64) -> BoundedCallOf { + let inner = pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }; + let outer = RuntimeCall::Balances(inner); + Preimage::bound(outer).unwrap() } #[test] fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { - let call = RuntimeCall::decode(&mut &set_balance_proposal(i)[..]).unwrap(); + let call = Preimage::realize(&set_balance_proposal(i)).unwrap().0; assert!(!::BaseCallFilter::contains(&call)); } } -fn set_balance_proposal_hash(value: u64) -> H256 { - BlakeTwo256::hash(&set_balance_proposal(value)[..]) -} - -fn set_balance_proposal_hash_and_note(value: u64) -> H256 { - let p = set_balance_proposal(value); - let h = BlakeTwo256::hash(&p[..]); - match Democracy::note_preimage(RuntimeOrigin::signed(6), p) { - Ok(_) => (), - Err(x) if x == Error::::DuplicatePreimage.into() => (), - Err(x) => panic!("{:?}", x), - } - h -} - fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal_hash(value), delay) -} - -fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal_hash_and_note(value), delay) + Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal(value), delay) } fn next_block() { @@ -272,7 +250,7 @@ fn fast_forward_to(n: u64) { fn begin_referendum() -> ReferendumIndex { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); 0 } diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index b98e51aa3d4d1..ff046d612c026 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -24,7 +24,7 @@ fn cancel_referendum_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -42,37 +42,13 @@ fn cancel_referendum_should_work() { }); } -#[test] -fn cancel_queued_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - // start of 2 => next referendum scheduled. - fast_forward_to(2); - - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 0, aye(1))); - - fast_forward_to(4); - - assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); - - assert_noop!( - Democracy::cancel_queued(RuntimeOrigin::root(), 1), - Error::::ProposalMissing - ); - assert_ok!(Democracy::cancel_queued(RuntimeOrigin::root(), 0)); - assert!(pallet_scheduler::Agenda::::get(6)[0].is_none()); - }); -} - #[test] fn emergency_cancel_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 2, ); @@ -86,7 +62,7 @@ fn emergency_cancel_should_work() { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 2, ); diff --git a/frame/democracy/src/tests/decoders.rs b/frame/democracy/src/tests/decoders.rs index 1fbb88060549b..1c8b9c3d980f9 100644 --- a/frame/democracy/src/tests/decoders.rs +++ b/frame/democracy/src/tests/decoders.rs @@ -18,7 +18,10 @@ //! The for various partial storage decoders use super::*; -use frame_support::storage::{migration, unhashed}; +use frame_support::{ + storage::{migration, unhashed}, + BoundedVec, +}; #[test] fn test_decode_compact_u32_at() { @@ -42,7 +45,8 @@ fn test_decode_compact_u32_at() { fn len_of_deposit_of() { new_test_ext().execute_with(|| { for l in vec![0, 1, 200, 1000] { - let value: (Vec, u64) = ((0..l).map(|_| Default::default()).collect(), 3u64); + let value: (BoundedVec, u64) = + ((0..l).map(|_| Default::default()).collect::>().try_into().unwrap(), 3u64); DepositOf::::insert(2, value); assert_eq!(Democracy::len_of_deposit_of(2), Some(l)); } @@ -51,35 +55,3 @@ fn len_of_deposit_of() { assert_eq!(Democracy::len_of_deposit_of(2), None); }) } - -#[test] -fn pre_image() { - new_test_ext().execute_with(|| { - let key = Default::default(); - let missing = PreimageStatus::Missing(0); - Preimages::::insert(key, missing); - assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); - assert_eq!(Democracy::check_pre_image_is_missing(key), Ok(())); - - Preimages::::remove(key); - assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); - assert_noop!(Democracy::check_pre_image_is_missing(key), Error::::NotImminent); - - for l in vec![0, 10, 100, 1000u32] { - let available = PreimageStatus::Available { - data: (0..l).map(|i| i as u8).collect(), - provider: 0, - deposit: 0, - since: 0, - expiry: None, - }; - - Preimages::::insert(key, available); - assert_eq!(Democracy::pre_image_data_len(key), Ok(l)); - assert_noop!( - Democracy::check_pre_image_is_missing(key), - Error::::DuplicatePreimage - ); - } - }) -} diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs index 4c5ee79286055..bca7cb9524112 100644 --- a/frame/democracy/src/tests/delegation.rs +++ b/frame/democracy/src/tests/delegation.rs @@ -24,7 +24,7 @@ fn single_proposal_should_work_with_delegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); @@ -75,7 +75,7 @@ fn cyclic_delegation_should_unwind() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); @@ -100,7 +100,7 @@ fn single_proposal_should_work_with_vote_and_delegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); @@ -122,7 +122,7 @@ fn single_proposal_should_work_with_undelegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); // Delegate and undelegate vote. assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index fda555b9c3459..4cfdd2aa74a3d 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -23,35 +23,29 @@ use super::*; fn veto_external_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); assert!(>::exists()); - let h = set_balance_proposal_hash_and_note(2); + let h = set_balance_proposal(2).hash(); assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), Error::::ProposalBlacklisted ); fast_forward_to(1); // fails as we're still in cooloff period. assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), Error::::ProposalBlacklisted ); fast_forward_to(2); // works; as we're out of the cooloff period. - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); assert!(>::exists()); // 3 can't veto the same thing twice. @@ -68,14 +62,11 @@ fn veto_external_works() { fast_forward_to(3); // same proposal fails as we're still in cooloff assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2)), Error::::ProposalBlacklisted ); // different proposal works fine. - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(3), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(3),)); }); } @@ -84,22 +75,16 @@ fn external_blacklisting_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); - let hash = set_balance_proposal_hash(2); + let hash = set_balance_proposal(2).hash(); assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); fast_forward_to(2); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert_noop!( - Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - ), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2)), Error::::ProposalBlacklisted, ); }); @@ -110,15 +95,12 @@ fn external_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(1), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(1), set_balance_proposal(2),), BadOrigin, ); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(1),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(1),), Error::::DuplicateProposal ); fast_forward_to(2); @@ -126,7 +108,7 @@ fn external_referendum_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -140,22 +122,19 @@ fn external_majority_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_majority( - RuntimeOrigin::signed(1), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_majority(RuntimeOrigin::signed(1), set_balance_proposal(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(3), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SimpleMajority, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -169,22 +148,19 @@ fn external_default_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_default( - RuntimeOrigin::signed(3), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_default(RuntimeOrigin::signed(3), set_balance_proposal(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_default( RuntimeOrigin::signed(1), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityAgainst, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -197,11 +173,8 @@ fn external_default_referendum_works() { fn external_and_public_interleaving_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(1), - )); - assert_ok!(propose_set_balance_and_note(6, 2, 2)); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(1),)); + assert_ok!(propose_set_balance(6, 2, 2)); fast_forward_to(2); @@ -210,17 +183,14 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash_and_note(1), + proposal: set_balance_proposal(1), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish external - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(3), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(3),)); fast_forward_to(4); @@ -229,7 +199,7 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(1), Ok(ReferendumStatus { end: 6, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -244,17 +214,14 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(2), Ok(ReferendumStatus { end: 8, - proposal_hash: set_balance_proposal_hash_and_note(3), + proposal: set_balance_proposal(3), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish external - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(5), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(5),)); fast_forward_to(8); @@ -263,18 +230,15 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(3), Ok(ReferendumStatus { end: 10, - proposal_hash: set_balance_proposal_hash_and_note(5), + proposal: set_balance_proposal(5), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish both - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(7), - )); - assert_ok!(propose_set_balance_and_note(6, 4, 2)); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(7),)); + assert_ok!(propose_set_balance(6, 4, 2)); fast_forward_to(10); @@ -283,16 +247,16 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(4), Ok(ReferendumStatus { end: 12, - proposal_hash: set_balance_proposal_hash_and_note(4), + proposal: set_balance_proposal(4), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish public again - assert_ok!(propose_set_balance_and_note(6, 6, 2)); + assert_ok!(propose_set_balance(6, 6, 2)); // cancel external - let h = set_balance_proposal_hash_and_note(7); + let h = set_balance_proposal(7).hash(); assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); fast_forward_to(12); @@ -302,7 +266,7 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(5), Ok(ReferendumStatus { end: 14, - proposal_hash: set_balance_proposal_hash_and_note(6), + proposal: set_balance_proposal(6), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 8fef985c8561c..97bb7a63908ab 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -23,14 +23,14 @@ use super::*; fn fast_track_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); + let h = set_balance_proposal(2).hash(); assert_noop!( Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(3), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 2, 0)); @@ -38,7 +38,7 @@ fn fast_track_referendum_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 2, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SimpleMajority, delay: 0, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -51,14 +51,14 @@ fn fast_track_referendum_works() { fn instant_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); + let h = set_balance_proposal(2).hash(); assert_noop!( Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(3), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 1, 0), BadOrigin); @@ -76,7 +76,7 @@ fn instant_referendum_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 1, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SimpleMajority, delay: 0, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -93,7 +93,7 @@ fn instant_next_block_referendum_backed() { let majority_origin_id = 3; let instant_origin_id = 6; let voting_period = 1; - let proposal_hash = set_balance_proposal_hash_and_note(2); + let proposal = set_balance_proposal(2); let delay = 2; // has no effect on test // init @@ -103,13 +103,13 @@ fn instant_next_block_referendum_backed() { // propose with majority origin assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(majority_origin_id), - proposal_hash + proposal.clone() )); // fast track with instant origin and voting period pointing to the next block assert_ok!(Democracy::fast_track( RuntimeOrigin::signed(instant_origin_id), - proposal_hash, + proposal.hash(), voting_period, delay )); @@ -119,7 +119,7 @@ fn instant_next_block_referendum_backed() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: start_block_number + voting_period, - proposal_hash, + proposal, threshold: VoteThreshold::SimpleMajority, delay, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -143,11 +143,8 @@ fn instant_next_block_referendum_backed() { fn fast_track_referendum_fails_when_no_simple_majority() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2) - )); + let h = set_balance_proposal(2).hash(); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); assert_noop!( Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::NotSimpleMajority diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index de1137f03fd38..540198ecf33a1 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -43,7 +43,7 @@ fn lock_voting_should_work() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -59,7 +59,7 @@ fn lock_voting_should_work() { assert_eq!(Balances::locks(i), vec![the_lock(i * 10)]); } - fast_forward_to(2); + fast_forward_to(3); // Referendum passed; 1 and 5 didn't get their way and can now reap and unlock. assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(1), r)); @@ -126,13 +126,13 @@ fn no_locks_without_conviction_should_work() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(0, 10))); - fast_forward_to(2); + fast_forward_to(3); assert_eq!(Balances::free_balance(42), 2); assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(2), 1, r)); @@ -146,7 +146,7 @@ fn lock_voting_should_work_with_delegation() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -167,28 +167,16 @@ fn lock_voting_should_work_with_delegation() { fn setup_three_referenda() -> (u32, u32, u32) { System::set_block_number(0); - let r1 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0, - ); + let r1 = + Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r1, aye(4, 10))); - let r2 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0, - ); + let r2 = + Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r2, aye(3, 20))); - let r3 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0, - ); + let r3 = + Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r3, aye(2, 50))); fast_forward_to(2); @@ -306,7 +294,7 @@ fn locks_should_persist_from_voting_to_delegation() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SimpleMajority, 0, ); diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs deleted file mode 100644 index 39536eab8009b..0000000000000 --- a/frame/democracy/src/tests/preimage.rs +++ /dev/null @@ -1,237 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The preimage tests. - -use super::*; - -#[test] -fn missing_preimage_should_fail() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 0, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); -} - -#[test] -fn preimage_deposit_should_be_required_and_returned() { - new_test_ext_execute_with_cond(|operational| { - // fee of 100 is too much. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); - assert_noop!( - if operational { - Democracy::note_preimage_operational(RuntimeOrigin::signed(6), vec![0; 500]) - } else { - Democracy::note_preimage(RuntimeOrigin::signed(6), vec![0; 500]) - }, - BalancesError::::InsufficientBalance, - ); - // fee of 1 is reasonable. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::free_balance(42), 2); - }); -} - -#[test] -fn preimage_deposit_should_be_reapable_earlier_by_owner() { - new_test_ext_execute_with_cond(|operational| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(if operational { - Democracy::note_preimage_operational(RuntimeOrigin::signed(6), set_balance_proposal(2)) - } else { - Democracy::note_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) - }); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - assert_noop!( - Democracy::reap_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal_hash(2), - u32::MAX - ), - Error::::TooEarly - ); - next_block(); - assert_ok!(Democracy::reap_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal_hash(2), - u32::MAX - )); - - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::reserved_balance(6), 0); - }); -} - -#[test] -fn preimage_deposit_should_be_reapable() { - new_test_ext_execute_with_cond(|operational| { - assert_noop!( - Democracy::reap_preimage( - RuntimeOrigin::signed(5), - set_balance_proposal_hash(2), - u32::MAX - ), - Error::::PreimageMissing - ); - - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(if operational { - Democracy::note_preimage_operational(RuntimeOrigin::signed(6), set_balance_proposal(2)) - } else { - Democracy::note_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) - }); - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - next_block(); - assert_noop!( - Democracy::reap_preimage( - RuntimeOrigin::signed(5), - set_balance_proposal_hash(2), - u32::MAX - ), - Error::::TooEarly - ); - - next_block(); - assert_ok!(Democracy::reap_preimage( - RuntimeOrigin::signed(5), - set_balance_proposal_hash(2), - u32::MAX - )); - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 48); - assert_eq!(Balances::free_balance(5), 62); - }); -} - -#[test] -fn noting_imminent_preimage_for_free_should_work() { - new_test_ext_execute_with_cond(|operational| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 1, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - - assert_noop!( - if operational { - Democracy::note_imminent_preimage_operational( - RuntimeOrigin::signed(6), - set_balance_proposal(2), - ) - } else { - Democracy::note_imminent_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) - }, - Error::::NotImminent - ); - - next_block(); - - // Now we're in the dispatch queue it's all good. - assert_ok!(Democracy::note_imminent_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal(2) - )); - - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); -} - -#[test] -fn reaping_imminent_preimage_should_fail() { - new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash_and_note(2); - let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - next_block(); - next_block(); - assert_noop!( - Democracy::reap_preimage(RuntimeOrigin::signed(6), h, u32::MAX), - Error::::Imminent - ); - }); -} - -#[test] -fn note_imminent_preimage_can_only_be_successful_once() { - new_test_ext().execute_with(|| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 1, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - next_block(); - - // First time works - assert_ok!(Democracy::note_imminent_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal(2) - )); - - // Second time fails - assert_noop!( - Democracy::note_imminent_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)), - Error::::DuplicatePreimage - ); - - // Fails from any user - assert_noop!( - Democracy::note_imminent_preimage(RuntimeOrigin::signed(5), set_balance_proposal(2)), - Error::::DuplicatePreimage - ); - }); -} diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index c52533e46ccc5..f48824dc95c5d 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -22,9 +22,9 @@ use super::*; #[test] fn backing_for_should_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); + assert_ok!(propose_set_balance(1, 3, 3)); assert_eq!(Democracy::backing_for(0), Some(2)); assert_eq!(Democracy::backing_for(1), Some(4)); assert_eq!(Democracy::backing_for(2), Some(3)); @@ -34,11 +34,11 @@ fn backing_for_should_work() { #[test] fn deposit_for_proposals_should_be_taken() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(propose_set_balance(1, 2, 5)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); assert_eq!(Balances::free_balance(5), 35); @@ -48,11 +48,11 @@ fn deposit_for_proposals_should_be_taken() { #[test] fn deposit_for_proposals_should_be_returned() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(propose_set_balance(1, 2, 5)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); fast_forward_to(3); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 20); @@ -77,30 +77,19 @@ fn poor_proposer_should_not_work() { #[test] fn poor_seconder_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(2, 2, 11)); + assert_ok!(propose_set_balance(2, 2, 11)); assert_noop!( - Democracy::second(RuntimeOrigin::signed(1), 0, u32::MAX), + Democracy::second(RuntimeOrigin::signed(1), 0), BalancesError::::InsufficientBalance ); }); } -#[test] -fn invalid_seconds_upper_bound_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_noop!( - Democracy::second(RuntimeOrigin::signed(2), 0, 0), - Error::::WrongUpperBound - ); - }); -} - #[test] fn cancel_proposal_should_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); assert_noop!(Democracy::cancel_proposal(RuntimeOrigin::signed(1), 0), BadOrigin); assert_ok!(Democracy::cancel_proposal(RuntimeOrigin::root(), 0)); System::assert_last_event(crate::Event::ProposalCanceled { prop_index: 0 }.into()); @@ -113,10 +102,10 @@ fn cancel_proposal_should_work() { fn blacklisting_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - let hash = set_balance_proposal_hash(2); + let hash = set_balance_proposal(2).hash(); - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); assert_noop!(Democracy::blacklist(RuntimeOrigin::signed(1), hash, None), BadOrigin); assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); @@ -124,11 +113,11 @@ fn blacklisting_should_work() { assert_eq!(Democracy::backing_for(0), None); assert_eq!(Democracy::backing_for(1), Some(4)); - assert_noop!(propose_set_balance_and_note(1, 2, 2), Error::::ProposalBlacklisted); + assert_noop!(propose_set_balance(1, 2, 2), Error::::ProposalBlacklisted); fast_forward_to(2); - let hash = set_balance_proposal_hash(4); + let hash = set_balance_proposal(4).hash(); assert_ok!(Democracy::referendum_status(0)); assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, Some(0))); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); @@ -139,9 +128,9 @@ fn blacklisting_should_work() { fn runners_up_should_come_after() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); + assert_ok!(propose_set_balance(1, 3, 3)); fast_forward_to(2); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 0, aye(1))); fast_forward_to(4); diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index 4b5fe8dd9c1c3..5e133f38945d6 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -24,7 +24,7 @@ fn simple_passing_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -43,7 +43,7 @@ fn simple_failing_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -62,13 +62,13 @@ fn ooo_inject_referendums_should_work() { new_test_ext().execute_with(|| { let r1 = Democracy::inject_referendum( 3, - set_balance_proposal_hash_and_note(3), + set_balance_proposal(3), VoteThreshold::SuperMajorityApprove, 0, ); let r2 = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -77,11 +77,13 @@ fn ooo_inject_referendums_should_work() { assert_eq!(tally(r2), Tally { ayes: 1, nays: 0, turnout: 10 }); next_block(); - assert_eq!(Balances::free_balance(42), 2); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r1, aye(1))); assert_eq!(tally(r1), Tally { ayes: 1, nays: 0, turnout: 10 }); + next_block(); + assert_eq!(Balances::free_balance(42), 2); + next_block(); assert_eq!(Balances::free_balance(42), 3); }); @@ -92,7 +94,7 @@ fn delayed_enactment_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 1, ); @@ -118,19 +120,19 @@ fn lowest_unbaked_should_be_sensible() { new_test_ext().execute_with(|| { let r1 = Democracy::inject_referendum( 3, - set_balance_proposal_hash_and_note(1), + set_balance_proposal(1), VoteThreshold::SuperMajorityApprove, 0, ); let r2 = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); let r3 = Democracy::inject_referendum( 10, - set_balance_proposal_hash_and_note(3), + set_balance_proposal(3), VoteThreshold::SuperMajorityApprove, 0, ); @@ -141,16 +143,19 @@ fn lowest_unbaked_should_be_sensible() { assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); - - // r2 is approved - assert_eq!(Balances::free_balance(42), 2); + // r2 ends with approval assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); - - // r1 is approved - assert_eq!(Balances::free_balance(42), 1); + // r1 ends with approval assert_eq!(Democracy::lowest_unbaked(), 3); assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + + // r2 is executed + assert_eq!(Balances::free_balance(42), 2); + + next_block(); + // r1 is executed + assert_eq!(Balances::free_balance(42), 1); }); } diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index 59d0cd6bc50ef..482cd430e0e7f 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -63,7 +63,7 @@ fn split_vote_cancellation_should_work() { fn single_proposal_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); let r = 0; assert!(Democracy::referendum_info(r).is_none()); @@ -76,7 +76,7 @@ fn single_proposal_should_work() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 1, nays: 0, turnout: 10 }, @@ -106,7 +106,7 @@ fn controversial_voting_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -132,7 +132,7 @@ fn controversial_low_turnout_voting_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -156,7 +156,7 @@ fn passing_low_turnout_voting_should_work() { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); diff --git a/frame/democracy/src/types.rs b/frame/democracy/src/types.rs index 52ab8a40eb3e3..4b7f1a0fac45c 100644 --- a/frame/democracy/src/types.rs +++ b/frame/democracy/src/types.rs @@ -18,7 +18,7 @@ //! Miscellaneous additional datatypes. use crate::{AccountVote, Conviction, Vote, VoteThreshold}; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Saturating, Zero}, @@ -26,7 +26,7 @@ use sp_runtime::{ }; /// Info regarding an ongoing referendum. -#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, MaxEncodedLen, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Tally { /// The number of aye votes, expressed in terms of post-conviction lock-vote. pub ayes: Balance, @@ -37,7 +37,9 @@ pub struct Tally { } /// Amount of votes and capital placed in delegation for an account. -#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive( + Encode, MaxEncodedLen, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, +)] pub struct Delegations { /// The number of votes (this is post-conviction). pub votes: Balance, @@ -160,12 +162,12 @@ impl< } /// Info regarding an ongoing referendum. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct ReferendumStatus { +#[derive(Encode, MaxEncodedLen, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct ReferendumStatus { /// When voting on this referendum will end. pub end: BlockNumber, - /// The hash of the proposal being voted on. - pub proposal_hash: Hash, + /// The proposal being voted on. + pub proposal: Proposal, /// The thresholding mechanism to determine whether it passed. pub threshold: VoteThreshold, /// The delay (in blocks) to wait after a successful referendum before deploying. @@ -175,23 +177,23 @@ pub struct ReferendumStatus { } /// Info regarding a referendum, present or past. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub enum ReferendumInfo { +#[derive(Encode, MaxEncodedLen, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub enum ReferendumInfo { /// Referendum is happening, the arg is the block number at which it will end. - Ongoing(ReferendumStatus), + Ongoing(ReferendumStatus), /// Referendum finished at `end`, and has been `approved` or rejected. Finished { approved: bool, end: BlockNumber }, } -impl ReferendumInfo { +impl ReferendumInfo { /// Create a new instance. pub fn new( end: BlockNumber, - proposal_hash: Hash, + proposal: Proposal, threshold: VoteThreshold, delay: BlockNumber, ) -> Self { - let s = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Tally::default() }; + let s = ReferendumStatus { end, proposal, threshold, delay, tally: Tally::default() }; ReferendumInfo::Ongoing(s) } } diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index c74623d4dfeb8..122f54febd8cf 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -18,11 +18,12 @@ //! The vote datatype. use crate::{Conviction, Delegations, ReferendumIndex}; -use codec::{Decode, Encode, EncodeLike, Input, Output}; +use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen, Output}; +use frame_support::traits::Get; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, - RuntimeDebug, + BoundedVec, RuntimeDebug, }; use sp_std::prelude::*; @@ -39,6 +40,12 @@ impl Encode for Vote { } } +impl MaxEncodedLen for Vote { + fn max_encoded_len() -> usize { + 1 + } +} + impl EncodeLike for Vote {} impl Decode for Vote { @@ -66,7 +73,7 @@ impl TypeInfo for Vote { } /// A vote for a referendum of a particular account. -#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] +#[derive(Encode, MaxEncodedLen, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum AccountVote { /// A standard vote, one-way (approve or reject) with a given amount of conviction. Standard { vote: Vote, balance: Balance }, @@ -107,7 +114,18 @@ impl AccountVote { /// A "prior" lock, i.e. a lock for some now-forgotten reason. #[derive( - Encode, Decode, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo, + Encode, + MaxEncodedLen, + Decode, + Default, + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + RuntimeDebug, + TypeInfo, )] pub struct PriorLock(BlockNumber, Balance); @@ -131,13 +149,15 @@ impl PriorLock { +#[derive(Clone, Encode, Decode, Eq, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)] +#[codec(mel_bound(skip_type_params(MaxVotes)))] +#[scale_info(skip_type_params(MaxVotes))] +pub enum Voting> { /// The account is voting directly. `delegations` is the total amount of post-conviction voting /// weight that it controls from those that have delegated to it. Direct { /// The current votes of the account. - votes: Vec<(ReferendumIndex, AccountVote)>, + votes: BoundedVec<(ReferendumIndex, AccountVote), MaxVotes>, /// The total amount of delegations that this account has received. delegations: Delegations, /// Any pre-existing locks from past voting/delegating activity. @@ -155,20 +175,24 @@ pub enum Voting { }, } -impl Default - for Voting +impl> Default + for Voting { fn default() -> Self { Voting::Direct { - votes: Vec::new(), + votes: Default::default(), delegations: Default::default(), prior: PriorLock(Zero::zero(), Default::default()), } } } -impl - Voting +impl< + Balance: Saturating + Ord + Zero + Copy, + BlockNumber: Ord + Copy + Zero, + AccountId, + MaxVotes: Get, + > Voting { pub fn rejig(&mut self, now: BlockNumber) { match self { diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index 443d6b1166198..e8ef91def9820 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -18,7 +18,7 @@ //! Voting thresholds. use crate::Tally; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -26,7 +26,9 @@ use sp_runtime::traits::{IntegerSquareRoot, Zero}; use sp_std::ops::{Add, Div, Mul, Rem}; /// A means of determining if a vote is past pass threshold. -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, TypeInfo)] +#[derive( + Clone, Copy, PartialEq, Eq, Encode, MaxEncodedLen, Decode, sp_runtime::RuntimeDebug, TypeInfo, +)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum VoteThreshold { /// A supermajority of approvals is needed to pass this vote. diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index 272921ed3a15d..0a3b717938022 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_democracy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_democracy +// --chain=dev // --output=./frame/democracy/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -45,27 +47,23 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_democracy. pub trait WeightInfo { fn propose() -> Weight; - fn second(s: u32, ) -> Weight; - fn vote_new(r: u32, ) -> Weight; - fn vote_existing(r: u32, ) -> Weight; + fn second() -> Weight; + fn vote_new() -> Weight; + fn vote_existing() -> Weight; fn emergency_cancel() -> Weight; - fn blacklist(p: u32, ) -> Weight; - fn external_propose(v: u32, ) -> Weight; + fn blacklist() -> Weight; + fn external_propose() -> Weight; fn external_propose_majority() -> Weight; fn external_propose_default() -> Weight; fn fast_track() -> Weight; - fn veto_external(v: u32, ) -> Weight; - fn cancel_proposal(p: u32, ) -> Weight; + fn veto_external() -> Weight; + fn cancel_proposal() -> Weight; fn cancel_referendum() -> Weight; - fn cancel_queued(r: u32, ) -> Weight; fn on_initialize_base(r: u32, ) -> Weight; fn on_initialize_base_with_launch_period(r: u32, ) -> Weight; fn delegate(r: u32, ) -> Weight; fn undelegate(r: u32, ) -> Weight; fn clear_public_proposals() -> Weight; - fn note_preimage(b: u32, ) -> Weight; - fn note_imminent_preimage(b: u32, ) -> Weight; - fn reap_preimage(b: u32, ) -> Weight; fn unlock_remove(r: u32, ) -> Weight; fn unlock_set(r: u32, ) -> Weight; fn remove_vote(r: u32, ) -> Weight; @@ -80,125 +78,103 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(48_328_000 as u64) + Weight::from_ref_time(57_410_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second(s: u32, ) -> Weight { - Weight::from_ref_time(30_923_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(s as u64)) + fn second() -> Weight { + Weight::from_ref_time(49_224_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new(r: u32, ) -> Weight { - Weight::from_ref_time(40_345_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as u64).saturating_mul(r as u64)) + fn vote_new() -> Weight { + Weight::from_ref_time(60_933_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing(r: u32, ) -> Weight { - Weight::from_ref_time(39_853_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(r as u64)) + fn vote_existing() -> Weight { + Weight::from_ref_time(60_393_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(19_364_000 as u64) + Weight::from_ref_time(24_588_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) - // Storage: Democracy DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) - fn blacklist(p: u32, ) -> Weight { - Weight::from_ref_time(57_708_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + fn blacklist() -> Weight { + Weight::from_ref_time(91_226_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose(v: u32, ) -> Weight { - Weight::from_ref_time(10_714_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(v as u64)) + fn external_propose() -> Weight { + Weight::from_ref_time(18_898_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(3_697_000 as u64) + Weight::from_ref_time(5_136_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(3_831_000 as u64) + Weight::from_ref_time(5_243_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(20_271_000 as u64) + Weight::from_ref_time(24_275_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external(v: u32, ) -> Weight { - Weight::from_ref_time(21_319_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(v as u64)) + fn veto_external() -> Weight { + Weight::from_ref_time(30_988_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(43_960_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(p as u64)) + fn cancel_proposal() -> Weight { + Weight::from_ref_time(78_515_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(13_475_000 as u64) + Weight::from_ref_time(16_155_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_queued(r: u32, ) -> Weight { - Weight::from_ref_time(24_320_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(3_428_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(7_007_000 as u64) + // Standard Error: 2_686 + .saturating_add(Weight::from_ref_time(2_288_781 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -208,33 +184,36 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy LastTabledWasExternal (r:1 w:0) // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(7_867_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(9_528_000 as u64) + // Standard Error: 2_521 + .saturating_add(Weight::from_ref_time(2_291_780 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:3 w:3) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_902_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(46_787_000 as u64) + // Standard Error: 2_943 + .saturating_add(Weight::from_ref_time(3_460_194 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy VotingOf (r:2 w:2) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(21_272_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(29_789_000 as u64) + // Standard Error: 2_324 + .saturating_add(Weight::from_ref_time(3_360_918 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) @@ -242,69 +221,48 @@ impl WeightInfo for SubstrateWeight { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(4_913_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(27_986_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_imminent_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(20_058_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - // Storage: System Account (r:1 w:0) - fn reap_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(28_619_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + Weight::from_ref_time(6_519_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(26_619_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(28_884_000 as u64) + // Standard Error: 2_631 + .saturating_add(Weight::from_ref_time(163_516 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(25_373_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(33_498_000 as u64) + // Standard Error: 622 + .saturating_add(Weight::from_ref_time(133_421 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_961_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_201_000 as u64) + // Standard Error: 1_007 + .saturating_add(Weight::from_ref_time(152_699 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_455_000 as u64) + // Standard Error: 951 + .saturating_add(Weight::from_ref_time(150_907 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -317,125 +275,103 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(48_328_000 as u64) + Weight::from_ref_time(57_410_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second(s: u32, ) -> Weight { - Weight::from_ref_time(30_923_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(s as u64)) + fn second() -> Weight { + Weight::from_ref_time(49_224_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new(r: u32, ) -> Weight { - Weight::from_ref_time(40_345_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as u64).saturating_mul(r as u64)) + fn vote_new() -> Weight { + Weight::from_ref_time(60_933_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing(r: u32, ) -> Weight { - Weight::from_ref_time(39_853_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(r as u64)) + fn vote_existing() -> Weight { + Weight::from_ref_time(60_393_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(19_364_000 as u64) + Weight::from_ref_time(24_588_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) - // Storage: Democracy DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) - fn blacklist(p: u32, ) -> Weight { - Weight::from_ref_time(57_708_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + fn blacklist() -> Weight { + Weight::from_ref_time(91_226_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose(v: u32, ) -> Weight { - Weight::from_ref_time(10_714_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(v as u64)) + fn external_propose() -> Weight { + Weight::from_ref_time(18_898_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(3_697_000 as u64) + Weight::from_ref_time(5_136_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(3_831_000 as u64) + Weight::from_ref_time(5_243_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(20_271_000 as u64) + Weight::from_ref_time(24_275_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external(v: u32, ) -> Weight { - Weight::from_ref_time(21_319_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(v as u64)) + fn veto_external() -> Weight { + Weight::from_ref_time(30_988_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(43_960_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(p as u64)) + fn cancel_proposal() -> Weight { + Weight::from_ref_time(78_515_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(13_475_000 as u64) + Weight::from_ref_time(16_155_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_queued(r: u32, ) -> Weight { - Weight::from_ref_time(24_320_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(3_428_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(7_007_000 as u64) + // Standard Error: 2_686 + .saturating_add(Weight::from_ref_time(2_288_781 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -445,33 +381,36 @@ impl WeightInfo for () { // Storage: Democracy LastTabledWasExternal (r:1 w:0) // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(7_867_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(9_528_000 as u64) + // Standard Error: 2_521 + .saturating_add(Weight::from_ref_time(2_291_780 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:3 w:3) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_902_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(46_787_000 as u64) + // Standard Error: 2_943 + .saturating_add(Weight::from_ref_time(3_460_194 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy VotingOf (r:2 w:2) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(21_272_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(29_789_000 as u64) + // Standard Error: 2_324 + .saturating_add(Weight::from_ref_time(3_360_918 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) @@ -479,69 +418,48 @@ impl WeightInfo for () { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(4_913_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(27_986_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_imminent_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(20_058_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - // Storage: System Account (r:1 w:0) - fn reap_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(28_619_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + Weight::from_ref_time(6_519_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(26_619_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(28_884_000 as u64) + // Standard Error: 2_631 + .saturating_add(Weight::from_ref_time(163_516 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(25_373_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(33_498_000 as u64) + // Standard Error: 622 + .saturating_add(Weight::from_ref_time(133_421 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_961_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_201_000 as u64) + // Standard Error: 1_007 + .saturating_add(Weight::from_ref_time(152_699 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_455_000 as u64) + // Standard Error: 951 + .saturating_add(Weight::from_ref_time(150_907 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 747d1e9f27106..20628da50937a 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -268,7 +268,7 @@ fn registration_should_work() { let mut three_fields = ten(); three_fields.additional.try_push(Default::default()).unwrap(); three_fields.additional.try_push(Default::default()).unwrap(); - assert_eq!(three_fields.additional.try_push(Default::default()), Err(())); + assert!(three_fields.additional.try_push(Default::default()).is_err()); assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_eq!(Identity::identity(10).unwrap().info, ten()); assert_eq!(Balances::free_balance(10), 90); diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 6c8b5fbaa7362..bfd0870d30c22 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -22,6 +22,9 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +# third party +log = { version = "0.4.17", default-features = false } + [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index c0b0097b07236..d949414e31cb3 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -31,7 +31,7 @@ const SEED: u32 = 0; fn setup_multi( s: u32, z: u32, -) -> Result<(Vec, OpaqueCall), &'static str> { +) -> Result<(Vec, Box<::RuntimeCall>), &'static str> { let mut signatories: Vec = Vec::new(); for i in 0..s { let signatory = account("signatory", i, SEED); @@ -44,8 +44,7 @@ fn setup_multi( // Must first convert to runtime call type. let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); - let call_data = OpaqueCall::::from_encoded(call.encode()); - Ok((signatories, call_data)) + Ok((signatories, Box::new(call))) } benchmarks! { @@ -74,35 +73,15 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); - let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, Weight::zero()) - verify { - assert!(Multisigs::::contains_key(multi_account_id, call_hash)); - assert!(!Calls::::contains_key(call_hash)); - } - - as_multi_create_store { - // Signatories, need at least 2 total people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, Weight::zero()) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); - assert!(Calls::::contains_key(call_hash)); } as_multi_approve { @@ -111,49 +90,22 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); - let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); - let mut signatories2 = signatories.clone(); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - // before the call, get the timepoint - let timepoint = Multisig::::timepoint(); - // Create the multi, storing for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; - assert!(Calls::::contains_key(call_hash)); - let caller2 = signatories2.remove(0); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller2); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::zero()) - verify { - let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; - assert_eq!(multisig.approvals.len(), 2); - } - - as_multi_approve_store { - // Signatories, need at least 3 people (so we don't complete the multisig) - let s in 3 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi, not storing - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, Weight::zero())?; - assert!(!Calls::::contains_key(call_hash)); + // Create the multi + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, Weight::zero()) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); - assert!(Calls::::contains_key(call_hash)); } as_multi_complete { @@ -162,27 +114,27 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi, storing it for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; + // Create the multi + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), Weight::zero())?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::MAX) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::MAX) verify { assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); } @@ -195,7 +147,7 @@ benchmarks! { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); @@ -214,7 +166,7 @@ benchmarks! { let mut signatories2 = signatories.clone(); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi @@ -224,7 +176,6 @@ benchmarks! { signatories, None, call, - false, Weight::zero() )?; let caller2 = signatories2.remove(0); @@ -237,45 +188,6 @@ benchmarks! { assert_eq!(multisig.approvals.len(), 2); } - approve_as_multi_complete { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length, not a component - let z = 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); - let mut signatories2 = signatories.clone(); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let call_hash = blake2_256(call.encoded()); - // before the call, get the timepoint - let timepoint = Multisig::::timepoint(); - // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; - // Everyone except the first person approves - for i in 1 .. s - 1 { - let mut signatories_loop = signatories2.clone(); - let caller_loop = signatories_loop.remove(i as usize); - let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; - } - let caller2 = signatories2.remove(0); - assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller2); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: approve_as_multi( - RawOrigin::Signed(caller2), - s as u16, - signatories2, - Some(timepoint), - call_hash, - Weight::MAX - ) - verify { - assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); - } - cancel_as_multi { // Signatories, need at least 2 people let s in 2 .. T::MaxSignatories::get() as u32; @@ -284,20 +196,18 @@ benchmarks! { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let timepoint = Multisig::::timepoint(); // Create the multi let o = RawOrigin::Signed(caller.clone()).into(); - Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, true, Weight::zero())?; + Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, Weight::zero())?; assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); - assert!(Calls::::contains_key(call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) verify { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); - assert!(!Calls::::contains_key(call_hash)); } impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 3bdb47ffc4568..e3031cc830209 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -47,6 +47,7 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +pub mod migrations; mod tests; pub mod weights; @@ -57,7 +58,7 @@ use frame_support::{ PostDispatchInfo, }, ensure, - traits::{Currency, Get, ReservableCurrency, WrapperKeepOpaque}, + traits::{Currency, Get, ReservableCurrency}, weights::Weight, RuntimeDebug, }; @@ -73,6 +74,20 @@ pub use weights::WeightInfo; pub use pallet::*; +/// The log target of this pallet. +pub const LOG_TARGET: &'static str = "runtime::multisig"; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: crate::LOG_TARGET, + concat!("[{:?}] ✍️ ", $patter), >::block_number() $(, $values)* + ) + }; +} + type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -100,12 +115,10 @@ pub struct Multisig { approvals: Vec, } -type OpaqueCall = WrapperKeepOpaque<::RuntimeCall>; - type CallHash = [u8; 32]; enum CallOrHash { - Call(OpaqueCall, bool), + Call(::RuntimeCall), Hash([u8; 32]), } @@ -152,9 +165,13 @@ pub mod pallet { type WeightInfo: WeightInfo; } + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); /// The set of open multisig operations. @@ -168,10 +185,6 @@ pub mod pallet { Multisig, T::AccountId>, >; - #[pallet::storage] - pub type Calls = - StorageMap<_, Identity, [u8; 32], (OpaqueCall, T::AccountId, BalanceOf)>; - #[pallet::error] pub enum Error { /// Threshold must be 2 or greater. @@ -343,13 +356,13 @@ pub mod pallet { /// taken for its lifetime of `DepositBase + threshold * DepositFactor`. /// ------------------------------- /// - DB Weight: - /// - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`) - /// - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`) + /// - Reads: Multisig Storage, [Caller Account] + /// - Writes: Multisig Storage, [Caller Account] /// - Plus Call Weight /// # #[pallet::weight({ let s = other_signatories.len() as u32; - let z = call.encoded_len() as u32; + let z = call.using_encoded(|d| d.len()) as u32; T::WeightInfo::as_multi_create(s, z) .max(T::WeightInfo::as_multi_create_store(s, z)) @@ -362,8 +375,7 @@ pub mod pallet { threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, - call: OpaqueCall, - store_call: bool, + call: Box<::RuntimeCall>, max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -372,7 +384,7 @@ pub mod pallet { threshold, other_signatories, maybe_timepoint, - CallOrHash::Call(call, store_call), + CallOrHash::Call(*call), max_weight, ) } @@ -462,8 +474,8 @@ pub mod pallet { /// - Storage: removes one item. /// ---------------------------------- /// - DB Weight: - /// - Read: Multisig Storage, [Caller Account], Refund Account, Calls - /// - Write: Multisig Storage, [Caller Account], Refund Account, Calls + /// - Read: Multisig Storage, [Caller Account], Refund Account + /// - Write: Multisig Storage, [Caller Account], Refund Account /// # #[pallet::weight(T::WeightInfo::cancel_as_multi(other_signatories.len() as u32))] pub fn cancel_as_multi( @@ -489,7 +501,6 @@ pub mod pallet { let err_amount = T::Currency::unreserve(&m.depositor, m.deposit); debug_assert!(err_amount.is_zero()); >::remove(&id, &call_hash); - Self::clear_call(&call_hash); Self::deposit_event(Event::MultisigCancelled { cancelling: who, @@ -531,13 +542,12 @@ impl Pallet { let id = Self::multi_account_id(&signatories, threshold); // Threshold > 1; this means it's a multi-step operation. We extract the `call_hash`. - let (call_hash, call_len, maybe_call, store) = match call_or_hash { - CallOrHash::Call(call, should_store) => { - let call_hash = blake2_256(call.encoded()); - let call_len = call.encoded_len(); - (call_hash, call_len, Some(call), should_store) + let (call_hash, call_len, maybe_call) = match call_or_hash { + CallOrHash::Call(call) => { + let (call_hash, call_len) = call.using_encoded(|d| (blake2_256(d), d.len())); + (call_hash, call_len, Some(call)) }, - CallOrHash::Hash(h) => (h, 0, None, false), + CallOrHash::Hash(h) => (h, 0, None), }; // Branch on whether the operation has already started or not. @@ -556,13 +566,7 @@ impl Pallet { } // We only bother fetching/decoding call if we know that we're ready to execute. - let maybe_approved_call = if approvals >= threshold { - Self::get_call(&call_hash, maybe_call.as_ref()) - } else { - None - }; - - if let Some((call, call_len)) = maybe_approved_call { + if let Some(call) = maybe_call.filter(|_| approvals >= threshold) { // verify weight ensure!( call.get_dispatch_info().weight.all_lte(max_weight), @@ -572,7 +576,6 @@ impl Pallet { // Clean up storage before executing call to avoid an possibility of reentrancy // attack. >::remove(&id, call_hash); - Self::clear_call(&call_hash); T::Currency::unreserve(&m.depositor, m.deposit); let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); @@ -596,19 +599,6 @@ impl Pallet { // We cannot dispatch the call now; either it isn't available, or it is, but we // don't have threshold approvals even with our signature. - // Store the call if desired. - let stored = if let Some(data) = maybe_call.filter(|_| store) { - Self::store_call_and_reserve( - who.clone(), - &call_hash, - data, - BalanceOf::::zero(), - )?; - true - } else { - false - }; - if let Some(pos) = maybe_pos { // Record approval. m.approvals.insert(pos, who.clone()); @@ -622,17 +612,11 @@ impl Pallet { } else { // If we already approved and didn't store the Call, then this was useless and // we report an error. - ensure!(stored, Error::::AlreadyApproved); + Err(Error::::AlreadyApproved)? } - let final_weight = if stored { - T::WeightInfo::as_multi_approve_store( - other_signatories_len as u32, - call_len as u32, - ) - } else { - T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32) - }; + let final_weight = + T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32); // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) } @@ -643,14 +627,7 @@ impl Pallet { // Just start the operation by recording it in storage. let deposit = T::DepositBase::get() + T::DepositFactor::get() * threshold.into(); - // Store the call if desired. - let stored = if let Some(data) = maybe_call.filter(|_| store) { - Self::store_call_and_reserve(who.clone(), &call_hash, data, deposit)?; - true - } else { - T::Currency::reserve(&who, deposit)?; - false - }; + T::Currency::reserve(&who, deposit)?; >::insert( &id, @@ -664,58 +641,13 @@ impl Pallet { ); Self::deposit_event(Event::NewMultisig { approving: who, multisig: id, call_hash }); - let final_weight = if stored { - T::WeightInfo::as_multi_create_store(other_signatories_len as u32, call_len as u32) - } else { - T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32) - }; + let final_weight = + T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32); // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) } } - /// Place a call's encoded data in storage, reserving funds as appropriate. - /// - /// We store `data` here because storing `call` would result in needing another `.encode`. - /// - /// Returns a `bool` indicating whether the data did end up being stored. - fn store_call_and_reserve( - who: T::AccountId, - hash: &[u8; 32], - data: OpaqueCall, - other_deposit: BalanceOf, - ) -> DispatchResult { - ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); - let deposit = other_deposit + - T::DepositBase::get() + - T::DepositFactor::get() * - BalanceOf::::from(((data.encoded_len() + 31) / 32) as u32); - T::Currency::reserve(&who, deposit)?; - Calls::::insert(&hash, (data, who, deposit)); - Ok(()) - } - - /// Attempt to decode and return the call, provided by the user or from storage. - fn get_call( - hash: &[u8; 32], - maybe_known: Option<&OpaqueCall>, - ) -> Option<(::RuntimeCall, usize)> { - maybe_known.map_or_else( - || { - Calls::::get(hash) - .and_then(|(data, ..)| Some((data.try_decode()?, data.encoded_len()))) - }, - |data| Some((data.try_decode()?, data.encoded_len())), - ) - } - - /// Attempt to remove a call from storage, returning any deposit on it to the owner. - fn clear_call(hash: &[u8; 32]) { - if let Some((_, who, deposit)) = Calls::::take(hash) { - T::Currency::unreserve(&who, deposit); - } - } - /// The current `Timepoint`. pub fn timepoint() -> Timepoint { Timepoint { diff --git a/frame/multisig/src/migrations.rs b/frame/multisig/src/migrations.rs new file mode 100644 index 0000000000000..5085297cde433 --- /dev/null +++ b/frame/multisig/src/migrations.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Migrations for Multisig Pallet + +use super::*; +use frame_support::{ + dispatch::GetStorageVersion, + traits::{OnRuntimeUpgrade, WrapperKeepOpaque}, + Identity, +}; + +#[cfg(feature = "try-runtime")] +use frame_support::ensure; + +pub mod v1 { + use super::*; + + type OpaqueCall = WrapperKeepOpaque<::RuntimeCall>; + + #[frame_support::storage_alias] + type Calls = StorageMap< + Pallet, + Identity, + [u8; 32], + (OpaqueCall, ::AccountId, BalanceOf), + >; + + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let onchain = Pallet::::on_chain_storage_version(); + + ensure!(onchain < 1, "this migration can be deleted"); + + log!(info, "Number of calls to refund and delete: {}", Calls::::iter().count()); + + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + + if onchain > 0 { + log!(info, "MigrateToV1 should be removed"); + return T::DbWeight::get().reads(1) + } + + Calls::::drain().for_each(|(_call_hash, (_data, caller, deposit))| { + T::Currency::unreserve(&caller, deposit); + }); + + current.put::>(); + + ::BlockWeights::get().max_block + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + let onchain = Pallet::::on_chain_storage_version(); + ensure!(onchain < 2, "this migration needs to be removed"); + ensure!(onchain == 1, "this migration needs to be run"); + ensure!( + Calls::::iter().count() == 0, + "there are some dangling calls that need to be destroyed and refunded" + ); + Ok(()) + } + } +} diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index b24a06f454368..f753b6f386c56 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -34,7 +34,6 @@ use sp_runtime::{ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; -type OpaqueCall = super::OpaqueCall; frame_support::construct_runtime!( pub enum Test where @@ -130,8 +129,8 @@ fn now() -> Timepoint { Multisig::timepoint() } -fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) +fn call_transfer(dest: u64, value: u64) -> Box { + Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest, value })) } #[test] @@ -144,14 +143,12 @@ fn multisig_deposit_is_taken_and_returned() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_eq!(Balances::free_balance(1), 2); @@ -162,8 +159,7 @@ fn multisig_deposit_is_taken_and_returned() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(1), 5); @@ -171,96 +167,6 @@ fn multisig_deposit_is_taken_and_returned() { }); } -#[test] -fn multisig_deposit_is_taken_and_returned_with_call_storage() { - new_test_ext().execute_with(|| { - let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - - let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), - 2, - vec![2, 3], - None, - OpaqueCall::from_encoded(data), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 5); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), - 2, - vec![1, 3], - Some(now()), - hash, - call_weight - )); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::reserved_balance(1), 0); - }); -} - -#[test] -fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { - new_test_ext().execute_with(|| { - let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - - let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), - 3, - vec![2, 3], - None, - hash, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 1); - assert_eq!(Balances::reserved_balance(1), 4); - - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), - 3, - vec![1, 3], - Some(now()), - OpaqueCall::from_encoded(data), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(2), 3); - assert_eq!(Balances::reserved_balance(2), 2); - assert_eq!(Balances::free_balance(1), 1); - assert_eq!(Balances::reserved_balance(1), 4); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(3), - 3, - vec![1, 2], - Some(now()), - hash, - call_weight - )); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(2), 5); - assert_eq!(Balances::reserved_balance(2), 0); - }); -} - #[test] fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { @@ -298,8 +204,8 @@ fn timepoint_checking_works() { assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); + let call = call_transfer(6, 15); + let hash = blake2_256(&call.encode()); assert_noop!( Multisig::approve_as_multi( @@ -328,8 +234,7 @@ fn timepoint_checking_works() { 2, vec![1, 3], None, - OpaqueCall::from_encoded(call.clone()), - false, + call.clone(), Weight::zero() ), Error::::NoTimepoint, @@ -341,8 +246,7 @@ fn timepoint_checking_works() { 2, vec![1, 3], Some(later), - OpaqueCall::from_encoded(call), - false, + call, Weight::zero() ), Error::::WrongTimepoint, @@ -350,41 +254,6 @@ fn timepoint_checking_works() { }); } -#[test] -fn multisig_2_of_3_works_with_call_storing() { - new_test_ext().execute_with(|| { - let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - - let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), - 2, - vec![2, 3], - None, - OpaqueCall::from_encoded(data), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), - 2, - vec![1, 3], - Some(now()), - hash, - call_weight - )); - assert_eq!(Balances::free_balance(6), 15); - }); -} - #[test] fn multisig_2_of_3_works() { new_test_ext().execute_with(|| { @@ -395,8 +264,7 @@ fn multisig_2_of_3_works() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), 2, @@ -412,8 +280,7 @@ fn multisig_2_of_3_works() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -430,8 +297,7 @@ fn multisig_3_of_3_works() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), 3, @@ -455,8 +321,7 @@ fn multisig_3_of_3_works() { 3, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -492,68 +357,6 @@ fn cancel_multisig_works() { }); } -#[test] -fn cancel_multisig_with_call_storage_works() { - new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), - 3, - vec![2, 3], - None, - OpaqueCall::from_encoded(call), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 4); - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), - 3, - vec![1, 3], - Some(now()), - hash, - Weight::zero() - )); - assert_noop!( - Multisig::cancel_as_multi(RuntimeOrigin::signed(2), 3, vec![1, 3], now(), hash), - Error::::NotOwner, - ); - assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash),); - assert_eq!(Balances::free_balance(1), 10); - }); -} - -#[test] -fn cancel_multisig_with_alt_call_storage_works() { - new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), - 3, - vec![2, 3], - None, - hash, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 6); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), - 3, - vec![1, 3], - Some(now()), - OpaqueCall::from_encoded(call), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(2), 8); - assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash)); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - }); -} - #[test] fn multisig_2_of_3_as_multi_works() { new_test_ext().execute_with(|| { @@ -564,14 +367,12 @@ fn multisig_2_of_3_as_multi_works() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -581,8 +382,7 @@ fn multisig_2_of_3_as_multi_works() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -599,18 +399,15 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { let call1 = call_transfer(6, 10); let call1_weight = call1.get_dispatch_info().weight; - let data1 = call1.encode(); let call2 = call_transfer(7, 5); let call2_weight = call2.get_dispatch_info().weight; - let data2 = call2.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data1.clone()), - false, + call1.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -618,8 +415,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 3], None, - OpaqueCall::from_encoded(data2.clone()), - false, + call2.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -627,8 +423,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data1), - false, + call1, call1_weight )); assert_ok!(Multisig::as_multi( @@ -636,8 +431,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data2), - false, + call2, call2_weight )); @@ -656,15 +450,13 @@ fn multisig_2_of_3_cannot_reissue_same_call() { let call = call_transfer(6, 10); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -672,8 +464,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), call_weight )); assert_eq!(Balances::free_balance(multi), 5); @@ -683,8 +474,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -692,8 +482,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data), - false, + call.clone(), call_weight )); @@ -714,15 +503,14 @@ fn multisig_2_of_3_cannot_reissue_same_call() { #[test] fn minimum_threshold_check_works() { new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); + let call = call_transfer(6, 15); assert_noop!( Multisig::as_multi( RuntimeOrigin::signed(1), 0, vec![2], None, - OpaqueCall::from_encoded(call.clone()), - false, + call.clone(), Weight::zero() ), Error::::MinimumThreshold, @@ -733,8 +521,7 @@ fn minimum_threshold_check_works() { 1, vec![2], None, - OpaqueCall::from_encoded(call.clone()), - false, + call.clone(), Weight::zero() ), Error::::MinimumThreshold, @@ -745,15 +532,14 @@ fn minimum_threshold_check_works() { #[test] fn too_many_signatories_fails() { new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); + let call = call_transfer(6, 15); assert_noop!( Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3, 4], None, - OpaqueCall::from_encoded(call), - false, + call.clone(), Weight::zero() ), Error::::TooManySignatories, @@ -815,8 +601,8 @@ fn multisig_1_of_3_works() { assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); + let call = call_transfer(6, 15); + let hash = blake2_256(&call.encode()); assert_noop!( Multisig::approve_as_multi( RuntimeOrigin::signed(1), @@ -834,17 +620,15 @@ fn multisig_1_of_3_works() { 1, vec![2, 3], None, - OpaqueCall::from_encoded(call), - false, + call.clone(), Weight::zero() ), Error::::MinimumThreshold, ); - let boxed_call = Box::new(call_transfer(6, 15)); assert_ok!(Multisig::as_multi_threshold_1( RuntimeOrigin::signed(1), vec![2, 3], - boxed_call + call_transfer(6, 15) )); assert_eq!(Balances::free_balance(6), 15); @@ -871,14 +655,12 @@ fn weight_check_works() { assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let data = call.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -889,8 +671,7 @@ fn weight_check_works() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, Weight::zero() ), Error::::MaxWeightTooLow, @@ -911,8 +692,7 @@ fn multisig_handles_no_preimage_after_all_approve() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), 3, @@ -944,8 +724,7 @@ fn multisig_handles_no_preimage_after_all_approve() { 3, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 953cf39cd12db..b3238630d3174 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -140,7 +140,7 @@ pub mod pallet { let sender = ensure_signed(origin)?; let bounded_name: BoundedVec<_, _> = - name.try_into().map_err(|()| Error::::TooLong)?; + name.try_into().map_err(|_| Error::::TooLong)?; ensure!(bounded_name.len() >= T::MinLength::get() as usize, Error::::TooShort); let deposit = if let Some((_, deposit)) = >::get(&sender) { @@ -229,7 +229,7 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; let bounded_name: BoundedVec<_, _> = - name.try_into().map_err(|()| Error::::TooLong)?; + name.try_into().map_err(|_| Error::::TooLong)?; let target = T::Lookup::lookup(target)?; let deposit = >::get(&target).map(|x| x.1).unwrap_or_else(Zero::zero); >::insert(&target, (bounded_name, deposit)); diff --git a/frame/preimage/Cargo.toml b/frame/preimage/Cargo.toml index 9a5cc186cca64..77046f4fb58b6 100644 --- a/frame/preimage/Cargo.toml +++ b/frame/preimage/Cargo.toml @@ -19,6 +19,7 @@ sp-core = { version = "6.0.0", default-features = false, optional = true, path = sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -36,10 +37,13 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "scale-info/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", +] diff --git a/frame/preimage/src/benchmarking.rs b/frame/preimage/src/benchmarking.rs index 3c7be9db573f4..8a61d7d780bfd 100644 --- a/frame/preimage/src/benchmarking.rs +++ b/frame/preimage/src/benchmarking.rs @@ -35,7 +35,7 @@ fn funded_account(name: &'static str, index: u32) -> T::AccountId { } fn preimage_and_hash() -> (Vec, T::Hash) { - sized_preimage_and_hash::(T::MaxSize::get()) + sized_preimage_and_hash::(MAX_SIZE) } fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { @@ -48,7 +48,7 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { benchmarks! { // Expensive note - will reserve. note_preimage { - let s in 0 .. T::MaxSize::get(); + let s in 0 .. MAX_SIZE; let caller = funded_account::("caller", 0); whitelist_account!(caller); let (preimage, hash) = sized_preimage_and_hash::(s); @@ -58,7 +58,7 @@ benchmarks! { } // Cheap note - will not reserve since it was requested. note_requested_preimage { - let s in 0 .. T::MaxSize::get(); + let s in 0 .. MAX_SIZE; let caller = funded_account::("caller", 0); whitelist_account!(caller); let (preimage, hash) = sized_preimage_and_hash::(s); @@ -69,7 +69,7 @@ benchmarks! { } // Cheap note - will not reserve since it's the manager. note_no_deposit_preimage { - let s in 0 .. T::MaxSize::get(); + let s in 0 .. MAX_SIZE; let (preimage, hash) = sized_preimage_and_hash::(s); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); }: note_preimage(T::ManagerOrigin::successful_origin(), preimage) @@ -101,10 +101,12 @@ benchmarks! { let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::("noter", 0); whitelist_account!(noter); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter).into(), preimage)); + assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); }: _(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let deposit = T::BaseDeposit::get() + T::ByteDeposit::get() * MAX_SIZE.into(); + let s = RequestStatus::Requested { deposit: Some((noter, deposit)), count: 1, len: Some(MAX_SIZE) }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Cheap request - would unreserve the deposit but none was held. request_no_deposit_preimage { @@ -112,14 +114,16 @@ benchmarks! { assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let s = RequestStatus::Requested { deposit: None, count: 2, len: Some(MAX_SIZE) }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Cheap request - the preimage is not yet noted, so deposit to unreserve. request_unnoted_preimage { let (_, hash) = preimage_and_hash::(); }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Cheap request - the preimage is already requested, so just a counter bump. request_requested_preimage { @@ -127,7 +131,8 @@ benchmarks! { assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(2))); + let s = RequestStatus::Requested { deposit: None, count: 2, len: None }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. @@ -154,7 +159,8 @@ benchmarks! { assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 90f5ac175f540..e899d3643dbbf 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -30,6 +30,7 @@ #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; #[cfg(test)] mod mock; #[cfg(test)] @@ -37,15 +38,18 @@ mod tests; pub mod weights; use sp_runtime::traits::{BadOrigin, Hash, Saturating}; -use sp_std::prelude::*; +use sp_std::{borrow::Cow, prelude::*}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::Pays, ensure, pallet_prelude::Get, - traits::{Currency, PreimageProvider, PreimageRecipient, ReservableCurrency}, - BoundedVec, + traits::{ + Currency, Defensive, FetchResult, Hash as PreimageHash, PreimageProvider, + PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, + }, + BoundedSlice, BoundedVec, }; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -59,20 +63,27 @@ pub use pallet::*; #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] pub enum RequestStatus { /// The associated preimage has not yet been requested by the system. The given deposit (if - /// some) is being held until either it becomes requested or the user retracts the primage. - Unrequested(Option<(AccountId, Balance)>), + /// some) is being held until either it becomes requested or the user retracts the preimage. + Unrequested { deposit: (AccountId, Balance), len: u32 }, /// There are a non-zero number of outstanding requests for this hash by this chain. If there - /// is a preimage registered, then it may be removed iff this counter becomes zero. - Requested(u32), + /// is a preimage registered, then `len` is `Some` and it may be removed iff this counter + /// becomes zero. + Requested { deposit: Option<(AccountId, Balance)>, count: u32, len: Option }, } type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +/// Maximum size of preimage we can store is 4mb. +const MAX_SIZE: u32 = 4 * 1024 * 1024; + #[frame_support::pallet] pub mod pallet { use super::*; + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. @@ -88,9 +99,6 @@ pub mod pallet { /// manage existing preimages. type ManagerOrigin: EnsureOrigin; - /// Max size allowed for a preimage. - type MaxSize: Get; - /// The base deposit for placing a preimage on chain. type BaseDeposit: Get>; @@ -100,6 +108,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData); #[pallet::event] @@ -116,7 +125,7 @@ pub mod pallet { #[pallet::error] pub enum Error { /// Preimage is too large to store on-chain. - TooLarge, + TooBig, /// Preimage has already been noted on-chain. AlreadyNoted, /// The user is not authorized to perform this action. @@ -134,10 +143,9 @@ pub mod pallet { pub(super) type StatusFor = StorageMap<_, Identity, T::Hash, RequestStatus>>; - /// The preimages stored by this pallet. #[pallet::storage] pub(super) type PreimageFor = - StorageMap<_, Identity, T::Hash, BoundedVec>; + StorageMap<_, Identity, (T::Hash, u32), BoundedVec>>; #[pallet::call] impl Pallet { @@ -150,9 +158,7 @@ pub mod pallet { // We accept a signed origin which will pay a deposit, or a root origin where a deposit // is not taken. let maybe_sender = Self::ensure_signed_or_manager(origin)?; - let bounded_vec = - BoundedVec::::try_from(bytes).map_err(|()| Error::::TooLarge)?; - let system_requested = Self::note_bytes(bounded_vec, maybe_sender.as_ref())?; + let (system_requested, _) = Self::note_bytes(bytes.into(), maybe_sender.as_ref())?; if system_requested || maybe_sender.is_none() { Ok(Pays::No.into()) } else { @@ -161,6 +167,11 @@ pub mod pallet { } /// Clear an unrequested preimage from the runtime storage. + /// + /// If `len` is provided, then it will be a much cheaper operation. + /// + /// - `hash`: The hash of the preimage to be removed from the store. + /// - `len`: The length of the preimage of `hash`. #[pallet::weight(T::WeightInfo::unnote_preimage())] pub fn unnote_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { let maybe_sender = Self::ensure_signed_or_manager(origin)?; @@ -203,41 +214,46 @@ impl Pallet { /// Store some preimage on chain. /// + /// If `maybe_depositor` is `None` then it is also requested. If `Some`, then it is not. + /// /// We verify that the preimage is within the bounds of what the pallet supports. /// /// If the preimage was requested to be uploaded, then the user pays no deposits or tx fees. fn note_bytes( - preimage: BoundedVec, + preimage: Cow<[u8]>, maybe_depositor: Option<&T::AccountId>, - ) -> Result { + ) -> Result<(bool, T::Hash), DispatchError> { let hash = T::Hashing::hash(&preimage); - ensure!(!PreimageFor::::contains_key(hash), Error::::AlreadyNoted); + let len = preimage.len() as u32; + ensure!(len <= MAX_SIZE, Error::::TooBig); - // We take a deposit only if there is a provided depositor, and the preimage was not + // We take a deposit only if there is a provided depositor and the preimage was not // previously requested. This also allows the tx to pay no fee. - let was_requested = match (StatusFor::::get(hash), maybe_depositor) { - (Some(RequestStatus::Requested(..)), _) => true, - (Some(RequestStatus::Unrequested(..)), _) => + let status = match (StatusFor::::get(hash), maybe_depositor) { + (Some(RequestStatus::Requested { count, deposit, .. }), _) => + RequestStatus::Requested { count, deposit, len: Some(len) }, + (Some(RequestStatus::Unrequested { .. }), Some(_)) => return Err(Error::::AlreadyNoted.into()), - (None, None) => { - StatusFor::::insert(hash, RequestStatus::Unrequested(None)); - false - }, + (Some(RequestStatus::Unrequested { len, deposit }), None) => + RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) }, + (None, None) => RequestStatus::Requested { count: 1, len: Some(len), deposit: None }, (None, Some(depositor)) => { let length = preimage.len() as u32; let deposit = T::BaseDeposit::get() .saturating_add(T::ByteDeposit::get().saturating_mul(length.into())); T::Currency::reserve(depositor, deposit)?; - let status = RequestStatus::Unrequested(Some((depositor.clone(), deposit))); - StatusFor::::insert(hash, status); - false + RequestStatus::Unrequested { deposit: (depositor.clone(), deposit), len } }, }; + let was_requested = matches!(status, RequestStatus::Requested { .. }); + StatusFor::::insert(hash, status); + + let _ = Self::insert(&hash, preimage) + .defensive_proof("Unable to insert. Logic error in `note_bytes`?"); - PreimageFor::::insert(hash, preimage); Self::deposit_event(Event::Noted { hash }); - Ok(was_requested) + Ok((was_requested, hash)) } // This function will add a hash to the list of requested preimages. @@ -245,19 +261,15 @@ impl Pallet { // If the preimage already exists before the request is made, the deposit for the preimage is // returned to the user, and removed from their management. fn do_request_preimage(hash: &T::Hash) { - let count = StatusFor::::get(hash).map_or(1, |x| match x { - RequestStatus::Requested(mut count) => { - count.saturating_inc(); - count - }, - RequestStatus::Unrequested(None) => 1, - RequestStatus::Unrequested(Some((owner, deposit))) => { - // Return the deposit - the preimage now has outstanding requests. - T::Currency::unreserve(&owner, deposit); - 1 - }, - }); - StatusFor::::insert(hash, RequestStatus::Requested(count)); + let (count, len, deposit) = + StatusFor::::get(hash).map_or((1, None, None), |x| match x { + RequestStatus::Requested { mut count, len, deposit } => { + count.saturating_inc(); + (count, len, deposit) + }, + RequestStatus::Unrequested { deposit, len } => (1, Some(len), Some(deposit)), + }); + StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); if count == 1 { Self::deposit_event(Event::Requested { hash: *hash }); } @@ -265,6 +277,8 @@ impl Pallet { // Clear a preimage from the storage of the chain, returning any deposit that may be reserved. // + // If `len` is provided, it will be a much cheaper operation. + // // If `maybe_owner` is provided, we verify that it is the correct owner before clearing the // data. fn do_unnote_preimage( @@ -272,51 +286,101 @@ impl Pallet { maybe_check_owner: Option, ) -> DispatchResult { match StatusFor::::get(hash).ok_or(Error::::NotNoted)? { - RequestStatus::Unrequested(Some((owner, deposit))) => { + RequestStatus::Requested { deposit: Some((owner, deposit)), count, len } => { ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); T::Currency::unreserve(&owner, deposit); + StatusFor::::insert( + hash, + RequestStatus::Requested { deposit: None, count, len }, + ); + Ok(()) }, - RequestStatus::Unrequested(None) => { + RequestStatus::Requested { deposit: None, .. } => { ensure!(maybe_check_owner.is_none(), Error::::NotAuthorized); + Self::do_unrequest_preimage(hash) + }, + RequestStatus::Unrequested { deposit: (owner, deposit), len } => { + ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); + T::Currency::unreserve(&owner, deposit); + StatusFor::::remove(hash); + + Self::remove(hash, len); + Self::deposit_event(Event::Cleared { hash: *hash }); + Ok(()) }, - RequestStatus::Requested(_) => return Err(Error::::Requested.into()), } - StatusFor::::remove(hash); - PreimageFor::::remove(hash); - Self::deposit_event(Event::Cleared { hash: *hash }); - Ok(()) } /// Clear a preimage request. fn do_unrequest_preimage(hash: &T::Hash) -> DispatchResult { match StatusFor::::get(hash).ok_or(Error::::NotRequested)? { - RequestStatus::Requested(mut count) if count > 1 => { + RequestStatus::Requested { mut count, len, deposit } if count > 1 => { count.saturating_dec(); - StatusFor::::insert(hash, RequestStatus::Requested(count)); + StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); }, - RequestStatus::Requested(count) => { + RequestStatus::Requested { count, len, deposit } => { debug_assert!(count == 1, "preimage request counter at zero?"); - PreimageFor::::remove(hash); - StatusFor::::remove(hash); - Self::deposit_event(Event::Cleared { hash: *hash }); + match (len, deposit) { + // Preimage was never noted. + (None, _) => StatusFor::::remove(hash), + // Preimage was noted without owner - just remove it. + (Some(len), None) => { + Self::remove(hash, len); + StatusFor::::remove(hash); + Self::deposit_event(Event::Cleared { hash: *hash }); + }, + // Preimage was noted with owner - move to unrequested so they can get refund. + (Some(len), Some(deposit)) => { + StatusFor::::insert(hash, RequestStatus::Unrequested { deposit, len }); + }, + } }, - RequestStatus::Unrequested(_) => return Err(Error::::NotRequested.into()), + RequestStatus::Unrequested { .. } => return Err(Error::::NotRequested.into()), } Ok(()) } + + fn insert(hash: &T::Hash, preimage: Cow<[u8]>) -> Result<(), ()> { + BoundedSlice::>::try_from(preimage.as_ref()) + .map(|s| PreimageFor::::insert((hash, s.len() as u32), s)) + } + + fn remove(hash: &T::Hash, len: u32) { + PreimageFor::::remove((hash, len)) + } + + fn have(hash: &T::Hash) -> bool { + Self::len(hash).is_some() + } + + fn len(hash: &T::Hash) -> Option { + use RequestStatus::*; + match StatusFor::::get(hash) { + Some(Requested { len: Some(len), .. }) | Some(Unrequested { len, .. }) => Some(len), + _ => None, + } + } + + fn fetch(hash: &T::Hash, len: Option) -> FetchResult { + let len = len.or_else(|| Self::len(hash)).ok_or(DispatchError::Unavailable)?; + PreimageFor::::get((hash, len)) + .map(|p| p.into_inner()) + .map(Into::into) + .ok_or(DispatchError::Unavailable) + } } impl PreimageProvider for Pallet { fn have_preimage(hash: &T::Hash) -> bool { - PreimageFor::::contains_key(hash) + Self::have(hash) } fn preimage_requested(hash: &T::Hash) -> bool { - matches!(StatusFor::::get(hash), Some(RequestStatus::Requested(..))) + matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) } fn get_preimage(hash: &T::Hash) -> Option> { - PreimageFor::::get(hash).map(|preimage| preimage.to_vec()) + Self::fetch(hash, None).ok().map(Cow::into_owned) } fn request_preimage(hash: &T::Hash) { @@ -330,15 +394,60 @@ impl PreimageProvider for Pallet { } impl PreimageRecipient for Pallet { - type MaxSize = T::MaxSize; + type MaxSize = ConstU32; // 2**22 fn note_preimage(bytes: BoundedVec) { // We don't really care if this fails, since that's only the case if someone else has // already noted it. - let _ = Self::note_bytes(bytes, None); + let _ = Self::note_bytes(bytes.into_inner().into(), None); } fn unnote_preimage(hash: &T::Hash) { + // Should never fail if authorization check is skipped. + let res = Self::do_unrequest_preimage(hash); + debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); + } +} + +impl> QueryPreimage for Pallet { + fn len(hash: &T::Hash) -> Option { + Pallet::::len(hash) + } + + fn fetch(hash: &T::Hash, len: Option) -> FetchResult { + Pallet::::fetch(hash, len) + } + + fn is_requested(hash: &T::Hash) -> bool { + matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) + } + + fn request(hash: &T::Hash) { + Self::do_request_preimage(hash) + } + + fn unrequest(hash: &T::Hash) { + let res = Self::do_unrequest_preimage(hash); + debug_assert!(res.is_ok(), "do_unrequest_preimage failed - counter underflow?"); + } +} + +impl> StorePreimage for Pallet { + const MAX_LENGTH: usize = MAX_SIZE as usize; + + fn note(bytes: Cow<[u8]>) -> Result { + // We don't really care if this fails, since that's only the case if someone else has + // already noted it. + let maybe_hash = Self::note_bytes(bytes, None).map(|(_, h)| h); + // Map to the correct trait error. + if maybe_hash == Err(DispatchError::from(Error::::TooBig)) { + Err(DispatchError::Exhausted) + } else { + maybe_hash + } + } + + fn unnote(hash: &T::Hash) { // Should never fail if authorization check is skipped. let res = Self::do_unnote_preimage(hash, None); debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); diff --git a/frame/preimage/src/migration.rs b/frame/preimage/src/migration.rs new file mode 100644 index 0000000000000..a5d15c23c758a --- /dev/null +++ b/frame/preimage/src/migration.rs @@ -0,0 +1,263 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the preimage pallet. + +use super::*; +use frame_support::{ + storage_alias, + traits::{ConstU32, OnRuntimeUpgrade}, +}; +use sp_std::collections::btree_map::BTreeMap; + +/// The log target. +const TARGET: &'static str = "runtime::preimage::migration::v1"; + +/// The original data layout of the preimage pallet without a specific version number. +mod v0 { + use super::*; + + #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] + pub enum RequestStatus { + Unrequested(Option<(AccountId, Balance)>), + Requested(u32), + } + + #[storage_alias] + pub type PreimageFor = StorageMap< + Pallet, + Identity, + ::Hash, + BoundedVec>, + >; + + #[storage_alias] + pub type StatusFor = StorageMap< + Pallet, + Identity, + ::Hash, + RequestStatus<::AccountId, BalanceOf>, + >; + + /// Returns the number of images or `None` if the storage is corrupted. + #[cfg(feature = "try-runtime")] + pub fn image_count() -> Option { + let images = v0::PreimageFor::::iter_values().count() as u32; + let status = v0::StatusFor::::iter_values().count() as u32; + + if images == status { + Some(images) + } else { + None + } + } +} + +pub mod v1 { + use super::*; + + /// Migration for moving preimage from V0 to V1 storage. + /// + /// Note: This needs to be run with the same hashing algorithm as before + /// since it is not re-hashing the preimages. + pub struct Migration(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for Migration { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); + + let images = v0::image_count::().expect("v0 storage corrupted"); + log::info!(target: TARGET, "Migrating {} images", &images); + Ok((images as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + if StorageVersion::get::>() != 0 { + log::warn!( + target: TARGET, + "skipping MovePreimagesIntoBuckets: executed on wrong storage version.\ + Expected version 0" + ); + return weight + } + + let status = v0::StatusFor::::drain().collect::>(); + weight.saturating_accrue(T::DbWeight::get().reads(status.len() as u64)); + + let preimages = v0::PreimageFor::::drain().collect::>(); + weight.saturating_accrue(T::DbWeight::get().reads(preimages.len() as u64)); + + for (hash, status) in status.into_iter() { + let preimage = if let Some(preimage) = preimages.get(&hash) { + preimage + } else { + log::error!(target: TARGET, "preimage not found for hash {:?}", &hash); + continue + }; + let len = preimage.len() as u32; + if len > MAX_SIZE { + log::error!( + target: TARGET, + "preimage too large for hash {:?}, len: {}", + &hash, + len + ); + continue + } + + let status = match status { + v0::RequestStatus::Unrequested(deposit) => match deposit { + Some(deposit) => RequestStatus::Unrequested { deposit, len }, + // `None` depositor becomes system-requested. + None => + RequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, + }, + v0::RequestStatus::Requested(count) if count == 0 => { + log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); + continue + }, + v0::RequestStatus::Requested(count) => + RequestStatus::Requested { deposit: None, count, len: Some(len) }, + }; + log::trace!(target: TARGET, "Moving preimage {:?} with len {}", hash, len); + + crate::StatusFor::::insert(hash, status); + crate::PreimageFor::::insert(&(hash, len), preimage); + + weight.saturating_accrue(T::DbWeight::get().writes(2)); + } + StorageVersion::new(1).put::>(); + + weight.saturating_add(T::DbWeight::get().writes(1)) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let old_images: u32 = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + let new_images = image_count::().expect("V1 storage corrupted"); + + if new_images != old_images { + log::error!( + target: TARGET, + "migrated {} images, expected {}", + new_images, + old_images + ); + } + assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); + Ok(()) + } + } + + /// Returns the number of images or `None` if the storage is corrupted. + #[cfg(feature = "try-runtime")] + pub fn image_count() -> Option { + // Use iter_values() to ensure that the values are decodable. + let images = crate::PreimageFor::::iter_values().count() as u32; + let status = crate::StatusFor::::iter_values().count() as u32; + + if images == status { + Some(images) + } else { + None + } + } +} + +#[cfg(test)] +#[cfg(feature = "try-runtime")] +mod test { + use super::*; + use crate::mock::{Test as T, *}; + + use frame_support::bounded_vec; + + #[test] + fn migration_works() { + new_test_ext().execute_with(|| { + assert_eq!(StorageVersion::get::>(), 0); + // Insert some preimages into the v0 storage: + + // Case 1: Unrequested without deposit + let (p, h) = preimage::(128); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(None)); + // Case 2: Unrequested with deposit + let (p, h) = preimage::(1024); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(Some((1, 1)))); + // Case 3: Requested by 0 (invalid) + let (p, h) = preimage::(8192); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Requested(0)); + // Case 4: Requested by 10 + let (p, h) = preimage::(65536); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Requested(10)); + + assert_eq!(v0::image_count::(), Some(4)); + assert_eq!(v1::image_count::(), None, "V1 storage should be corrupted"); + + let state = v1::Migration::::pre_upgrade().unwrap(); + let _w = v1::Migration::::on_runtime_upgrade(); + v1::Migration::::post_upgrade(state).unwrap(); + + // V0 and V1 share the same prefix, so `iter_values` still counts the same. + assert_eq!(v0::image_count::(), Some(3)); + assert_eq!(v1::image_count::(), Some(3)); // One gets skipped therefore 3. + assert_eq!(StorageVersion::get::>(), 1); + + // Case 1: Unrequested without deposit becomes system-requested + let (p, h) = preimage::(128); + assert_eq!(crate::PreimageFor::::get(&(h, 128)), Some(p)); + assert_eq!( + crate::StatusFor::::get(h), + Some(RequestStatus::Requested { deposit: None, count: 1, len: Some(128) }) + ); + // Case 2: Unrequested with deposit becomes unrequested + let (p, h) = preimage::(1024); + assert_eq!(crate::PreimageFor::::get(&(h, 1024)), Some(p)); + assert_eq!( + crate::StatusFor::::get(h), + Some(RequestStatus::Unrequested { deposit: (1, 1), len: 1024 }) + ); + // Case 3: Requested by 0 should be skipped + let (_, h) = preimage::(8192); + assert_eq!(crate::PreimageFor::::get(&(h, 8192)), None); + assert_eq!(crate::StatusFor::::get(h), None); + // Case 4: Requested by 10 becomes requested by 10 + let (p, h) = preimage::(65536); + assert_eq!(crate::PreimageFor::::get(&(h, 65536)), Some(p)); + assert_eq!( + crate::StatusFor::::get(h), + Some(RequestStatus::Requested { deposit: None, count: 10, len: Some(65536) }) + ); + }); + } + + /// Returns a preimage with a given size and its hash. + fn preimage( + len: usize, + ) -> (BoundedVec>, ::Hash) { + let p = bounded_vec![1; len]; + let h = ::Hashing::hash_of(&p); + (p, h) + } +} diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index e12598a35b4bb..ce74ea65bd8aa 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -105,7 +105,6 @@ impl Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureSignedBy; - type MaxSize = ConstU32<1024>; type BaseDeposit = ConstU64<2>; type ByteDeposit = ConstU64<1>; } diff --git a/frame/preimage/src/tests.rs b/frame/preimage/src/tests.rs index e6b64ae16dd8c..f480b9c36b670 100644 --- a/frame/preimage/src/tests.rs +++ b/frame/preimage/src/tests.rs @@ -17,11 +17,35 @@ //! # Scheduler tests. +#![cfg(test)] + use super::*; use crate::mock::*; -use frame_support::{assert_noop, assert_ok}; +use frame_support::{ + assert_err, assert_noop, assert_ok, assert_storage_noop, bounded_vec, + traits::{Bounded, BoundedInline, Hash as PreimageHash}, + StorageNoopGuard, +}; use pallet_balances::Error as BalancesError; +use sp_core::{blake2_256, H256}; + +/// Returns one `Inline`, `Lookup` and `Legacy` item each with different data and hash. +pub fn make_bounded_values() -> (Bounded>, Bounded>, Bounded>) { + let data: BoundedInline = bounded_vec![1]; + let inline = Bounded::>::Inline(data); + + let data = vec![1, 2]; + let hash: H256 = blake2_256(&data[..]).into(); + let len = data.len() as u32; + let lookup = Bounded::>::unrequested(hash, len); + + let data = vec![1, 2, 3]; + let hash: H256 = blake2_256(&data[..]).into(); + let legacy = Bounded::>::Legacy { hash, dummy: Default::default() }; + + (inline, lookup, legacy) +} #[test] fn user_note_preimage_works() { @@ -56,10 +80,7 @@ fn manager_note_preimage_works() { assert!(Preimage::have_preimage(&h)); assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - assert_noop!( - Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1]), - Error::::AlreadyNoted - ); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); }); } @@ -130,14 +151,16 @@ fn requested_then_noted_preimage_cannot_be_unnoted() { new_test_ext().execute_with(|| { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1])), - Error::::Requested - ); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); + // it's still here. let h = hashed([1]); assert!(Preimage::have_preimage(&h)); assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); + + // now it's gone + assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert!(!Preimage::have_preimage(&hashed([1]))); }); } @@ -145,15 +168,16 @@ fn requested_then_noted_preimage_cannot_be_unnoted() { fn request_note_order_makes_no_difference() { let one_way = new_test_ext().execute_with(|| { assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -189,6 +213,7 @@ fn request_user_note_order_makes_no_difference() { new_test_ext().execute_with(|| { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -226,8 +251,240 @@ fn user_noted_then_requested_preimage_is_refunded_once_only() { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); // Still have reserve from `vec[1; 3]`. assert_eq!(Balances::reserved_balance(2), 5); assert_eq!(Balances::free_balance(2), 95); }); } + +#[test] +fn noted_preimage_use_correct_map() { + new_test_ext().execute_with(|| { + // Add one preimage per bucket... + for i in 0..7 { + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; 128 << (i * 2)])); + } + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; MAX_SIZE as usize])); + assert_eq!(PreimageFor::::iter().count(), 8); + + // All are present + assert_eq!(StatusFor::::iter().count(), 8); + + // Now start removing them again... + for i in 0..7 { + assert_ok!(Preimage::unnote_preimage( + RuntimeOrigin::signed(1), + hashed(vec![0; 128 << (i * 2)]) + )); + } + assert_eq!(PreimageFor::::iter().count(), 1); + assert_ok!(Preimage::unnote_preimage( + RuntimeOrigin::signed(1), + hashed(vec![0; MAX_SIZE as usize]) + )); + assert_eq!(PreimageFor::::iter().count(), 0); + + // All are gone + assert_eq!(StatusFor::::iter().count(), 0); + }); +} + +/// The `StorePreimage` and `QueryPreimage` traits work together. +#[test] +fn query_and_store_preimage_workflow() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let data: Vec = vec![1; 512]; + let encoded = data.encode(); + + // Bound an unbound value. + let bound = Preimage::bound(data.clone()).unwrap(); + let (len, hash) = (bound.len().unwrap(), bound.hash()); + + assert_eq!(hash, blake2_256(&encoded).into()); + assert_eq!(bound.len(), Some(len)); + assert!(bound.lookup_needed(), "Should not be Inlined"); + assert_eq!(bound.lookup_len(), Some(len)); + + // The value is requested and available. + assert!(Preimage::is_requested(&hash)); + assert!(::have(&bound)); + assert_eq!(Preimage::len(&hash), Some(len)); + + // It can be fetched with length. + assert_eq!(Preimage::fetch(&hash, Some(len)).unwrap(), encoded); + // ... and without length. + assert_eq!(Preimage::fetch(&hash, None).unwrap(), encoded); + // ... but not with wrong length. + assert_err!(Preimage::fetch(&hash, Some(0)), DispatchError::Unavailable); + + // It can be peeked and decoded correctly. + assert_eq!(Preimage::peek::>(&bound).unwrap(), (data.clone(), Some(len))); + // Request it two more times. + assert_eq!(Preimage::pick::>(hash, len), bound); + Preimage::request(&hash); + // It is requested thrice. + assert!(matches!( + StatusFor::::get(&hash).unwrap(), + RequestStatus::Requested { count: 3, .. } + )); + + // It can be realized and decoded correctly. + assert_eq!(Preimage::realize::>(&bound).unwrap(), (data.clone(), Some(len))); + assert!(matches!( + StatusFor::::get(&hash).unwrap(), + RequestStatus::Requested { count: 2, .. } + )); + // Dropping should unrequest. + Preimage::drop(&bound); + assert!(matches!( + StatusFor::::get(&hash).unwrap(), + RequestStatus::Requested { count: 1, .. } + )); + + // Is still available. + assert!(::have(&bound)); + // Manually unnote it. + Preimage::unnote(&hash); + // Is not available anymore. + assert!(!::have(&bound)); + assert_err!(Preimage::fetch(&hash, Some(len)), DispatchError::Unavailable); + // And not requested since the traits assume permissioned origin. + assert!(!Preimage::is_requested(&hash)); + + // No storage changes remain. Checked by `StorageNoopGuard`. + }); +} + +/// The request function behaves as expected. +#[test] +fn query_preimage_request_works() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let data: Vec = vec![1; 10]; + let hash: PreimageHash = blake2_256(&data[..]).into(); + + // Request the preimage. + ::request(&hash); + + // The preimage is requested with unknown length and cannot be fetched. + assert!(::is_requested(&hash)); + assert!(::len(&hash).is_none()); + assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); + + // Request again. + ::request(&hash); + // The preimage is still requested. + assert!(::is_requested(&hash)); + assert!(::len(&hash).is_none()); + assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); + // But there is only one entry in the map. + assert_eq!(StatusFor::::iter().count(), 1); + + // Un-request the preimage. + ::unrequest(&hash); + // It is still requested. + assert!(::is_requested(&hash)); + // Un-request twice. + ::unrequest(&hash); + // It is not requested anymore. + assert!(!::is_requested(&hash)); + // And there is no entry in the map. + assert_eq!(StatusFor::::iter().count(), 0); + }); +} + +/// The `QueryPreimage` functions can be used together with `Bounded` values. +#[test] +fn query_preimage_hold_and_drop_work() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let (inline, lookup, legacy) = make_bounded_values(); + + // `hold` does nothing for `Inline` values. + assert_storage_noop!(::hold(&inline)); + // `hold` requests `Lookup` values. + ::hold(&lookup); + assert!(::is_requested(&lookup.hash())); + // `hold` requests `Legacy` values. + ::hold(&legacy); + assert!(::is_requested(&legacy.hash())); + + // There are two values requested in total. + assert_eq!(StatusFor::::iter().count(), 2); + + // Cleanup by dropping both. + ::drop(&lookup); + assert!(!::is_requested(&lookup.hash())); + ::drop(&legacy); + assert!(!::is_requested(&legacy.hash())); + + // There are no values requested anymore. + assert_eq!(StatusFor::::iter().count(), 0); + }); +} + +/// The `StorePreimage` trait works as expected. +#[test] +fn store_preimage_basic_works() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let data: Vec = vec![1; 512]; // Too large to inline. + let encoded = Cow::from(data.encode()); + + // Bound the data. + let bound = ::bound(data.clone()).unwrap(); + // The preimage can be peeked. + assert_ok!(::peek(&bound)); + // Un-note the preimage. + ::unnote(&bound.hash()); + // The preimage cannot be peeked anymore. + assert_err!(::peek(&bound), DispatchError::Unavailable); + // Noting the wrong pre-image does not make it peek-able. + assert_ok!(::note(Cow::Borrowed(&data))); + assert_err!(::peek(&bound), DispatchError::Unavailable); + + // Manually note the preimage makes it peek-able again. + assert_ok!(::note(encoded.clone())); + // Noting again works. + assert_ok!(::note(encoded)); + assert_ok!(::peek(&bound)); + + // Cleanup. + ::unnote(&bound.hash()); + let data_hash = blake2_256(&data); + ::unnote(&data_hash.into()); + + // No storage changes remain. Checked by `StorageNoopGuard`. + }); +} + +#[test] +fn store_preimage_note_too_large_errors() { + new_test_ext().execute_with(|| { + // Works with `MAX_LENGTH`. + let len = ::MAX_LENGTH; + let data = vec![0u8; len]; + assert_ok!(::note(data.into())); + + // Errors with `MAX_LENGTH+1`. + let data = vec![0u8; len + 1]; + assert_err!(::note(data.into()), DispatchError::Exhausted); + }); +} + +#[test] +fn store_preimage_bound_too_large_errors() { + new_test_ext().execute_with(|| { + // Using `MAX_LENGTH` number of bytes in a vector does not work + // since SCALE prepends the length. + let len = ::MAX_LENGTH; + let data: Vec = vec![0; len]; + assert_err!(::bound(data.clone()), DispatchError::Exhausted); + + // Works with `MAX_LENGTH-4`. + let data: Vec = vec![0; len - 4]; + assert_ok!(::bound(data.clone())); + }); +} diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index ad9e3e569e733..186c41b798c6b 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_preimage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_preimage +// --chain=dev // --output=./frame/preimage/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,88 +63,90 @@ pub trait WeightInfo { /// Weights for pallet_preimage using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(32_591_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(23_350_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_681 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(21_436_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_380_000 as u64) + Weight::from_ref_time(44_567_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_280_000 as u64) + Weight::from_ref_time(30_065_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(42_809_000 as u64) + Weight::from_ref_time(28_470_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(28_964_000 as u64) + Weight::from_ref_time(14_601_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(17_555_000 as u64) + Weight::from_ref_time(20_121_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(7_745_000 as u64) + Weight::from_ref_time(9_440_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_758_000 as u64) + Weight::from_ref_time(29_013_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(18_360_000 as u64) + Weight::from_ref_time(9_223_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(7_439_000 as u64) + Weight::from_ref_time(9_252_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -150,88 +154,90 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(32_591_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(23_350_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_681 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(21_436_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_380_000 as u64) + Weight::from_ref_time(44_567_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_280_000 as u64) + Weight::from_ref_time(30_065_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(42_809_000 as u64) + Weight::from_ref_time(28_470_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(28_964_000 as u64) + Weight::from_ref_time(14_601_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(17_555_000 as u64) + Weight::from_ref_time(20_121_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(7_745_000 as u64) + Weight::from_ref_time(9_440_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_758_000 as u64) + Weight::from_ref_time(29_013_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(18_360_000 as u64) + Weight::from_ref_time(9_223_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(7_439_000 as u64) + Weight::from_ref_time(9_252_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 0b6b7b0f51d9a..18d3d48dc024c 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -452,7 +452,7 @@ pub mod pallet { ensure!(!friends.is_empty(), Error::::NotEnoughFriends); ensure!(threshold as usize <= friends.len(), Error::::NotEnoughFriends); let bounded_friends: FriendsOf = - friends.try_into().map_err(|()| Error::::MaxFriends)?; + friends.try_into().map_err(|_| Error::::MaxFriends)?; ensure!(Self::is_sorted_and_unique(&bounded_friends), Error::::NotSorted); // Total deposit is base fee + number of friends * factor fee let friend_deposit = T::FriendDepositFactor::get() @@ -554,7 +554,7 @@ pub mod pallet { Err(pos) => active_recovery .friends .try_insert(pos, who.clone()) - .map_err(|()| Error::::MaxFriends)?, + .map_err(|_| Error::::MaxFriends)?, } // Update storage with the latest details >::insert(&lost, &rescuer, active_recovery); diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 45ec894e2c2c0..bc6fb31bf1127 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -24,10 +24,10 @@ use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account} use frame_support::{ assert_ok, dispatch::UnfilteredDispatchable, - traits::{Currency, EnsureOrigin}, + traits::{Bounded, Currency, EnsureOrigin}, }; use frame_system::RawOrigin; -use sp_runtime::traits::{Bounded, Hash}; +use sp_runtime::traits::Bounded as ArithBounded; const SEED: u32 = 0; @@ -42,6 +42,12 @@ fn funded_account, I: 'static>(name: &'static str, index: u32) -> T caller } +fn dummy_call, I: 'static>() -> Bounded<>::RuntimeCall> { + let inner = frame_system::Call::remark { remark: vec![] }; + let call = >::RuntimeCall::from(inner); + T::Preimages::bound(call).unwrap() +} + fn create_referendum, I: 'static>() -> (T::RuntimeOrigin, ReferendumIndex) { let origin: T::RuntimeOrigin = T::SubmitOrigin::successful_origin(); if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { @@ -50,9 +56,9 @@ fn create_referendum, I: 'static>() -> (T::RuntimeOrigin, Referendu } let proposal_origin = Box::new(RawOrigin::Root.into()); - let proposal_hash = T::Hashing::hash_of(&0); + let proposal = dummy_call::(); let enactment_moment = DispatchTime::After(0u32.into()); - let call = Call::::submit { proposal_origin, proposal_hash, enactment_moment }; + let call = crate::Call::::submit { proposal_origin, proposal, enactment_moment }; assert_ok!(call.dispatch_bypass_filter(origin.clone())); let index = ReferendumCount::::get() - 1; (origin, index) @@ -196,7 +202,7 @@ benchmarks_instance_pallet! { }: _( origin, Box::new(RawOrigin::Root.into()), - T::Hashing::hash_of(&0), + dummy_call::(), DispatchTime::After(0u32.into()) ) verify { let index = ReferendumCount::::get().checked_sub(1).unwrap(); diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 739f0dbc30ed4..1bdc19d49c414 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -69,11 +69,11 @@ use frame_support::{ ensure, traits::{ schedule::{ - v2::{Anon as ScheduleAnon, Named as ScheduleNamed}, - DispatchTime, MaybeHashed, + v3::{Anon as ScheduleAnon, Named as ScheduleNamed}, + DispatchTime, }, - Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, - ReservableCurrency, VoteTally, + Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, QueryPreimage, + ReservableCurrency, StorePreimage, VoteTally, }, BoundedVec, }; @@ -92,10 +92,10 @@ use self::branch::{BeginDecidingBranch, OneFewerDecidingBranch, ServiceBranch}; pub use self::{ pallet::*, types::{ - BalanceOf, CallOf, Curve, DecidingStatus, DecidingStatusOf, Deposit, InsertSorted, - NegativeImbalanceOf, PalletsOriginOf, ReferendumIndex, ReferendumInfo, ReferendumInfoOf, - ReferendumStatus, ReferendumStatusOf, ScheduleAddressOf, TallyOf, TrackIdOf, TrackInfo, - TrackInfoOf, TracksInfo, VotesOf, + BalanceOf, BoundedCallOf, CallOf, Curve, DecidingStatus, DecidingStatusOf, Deposit, + InsertSorted, NegativeImbalanceOf, PalletsOriginOf, ReferendumIndex, ReferendumInfo, + ReferendumInfoOf, ReferendumStatus, ReferendumStatusOf, ScheduleAddressOf, TallyOf, + TrackIdOf, TrackInfo, TrackInfoOf, TracksInfo, VotesOf, }, weights::WeightInfo, }; @@ -149,23 +149,16 @@ pub mod pallet { // System level stuff. type RuntimeCall: Parameter + Dispatchable - + From>; + + From> + + IsType<::RuntimeCall> + + From>; type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The Scheduler. - type Scheduler: ScheduleAnon< - Self::BlockNumber, - CallOf, - PalletsOriginOf, - Hash = Self::Hash, - > + ScheduleNamed< - Self::BlockNumber, - CallOf, - PalletsOriginOf, - Hash = Self::Hash, - >; + type Scheduler: ScheduleAnon, PalletsOriginOf> + + ScheduleNamed, PalletsOriginOf>; /// Currency type for this pallet. type Currency: ReservableCurrency; // Origins and unbalances. @@ -221,6 +214,9 @@ pub mod pallet { Self::BlockNumber, RuntimeOrigin = ::PalletsOrigin, >; + + /// The preimage provider. + type Preimages: QueryPreimage + StorePreimage; } /// The next free referendum index, aka the number of referenda started so far. @@ -259,8 +255,8 @@ pub mod pallet { index: ReferendumIndex, /// The track (and by extension proposal dispatch origin) of this referendum. track: TrackIdOf, - /// The hash of the proposal up for referendum. - proposal_hash: T::Hash, + /// The proposal for the referendum. + proposal: BoundedCallOf, }, /// The decision deposit has been placed. DecisionDepositPlaced { @@ -293,8 +289,8 @@ pub mod pallet { index: ReferendumIndex, /// The track (and by extension proposal dispatch origin) of this referendum. track: TrackIdOf, - /// The hash of the proposal up for referendum. - proposal_hash: T::Hash, + /// The proposal for the referendum. + proposal: BoundedCallOf, /// The current tally of votes in this referendum. tally: T::Tally, }, @@ -381,7 +377,7 @@ pub mod pallet { /// - `origin`: must be `SubmitOrigin` and the account must have `SubmissionDeposit` funds /// available. /// - `proposal_origin`: The origin from which the proposal should be executed. - /// - `proposal_hash`: The hash of the proposal preimage. + /// - `proposal`: The proposal. /// - `enactment_moment`: The moment that the proposal should be enacted. /// /// Emits `Submitted`. @@ -389,7 +385,7 @@ pub mod pallet { pub fn submit( origin: OriginFor, proposal_origin: Box>, - proposal_hash: T::Hash, + proposal: BoundedCallOf, enactment_moment: DispatchTime, ) -> DispatchResult { let who = T::SubmitOrigin::ensure_origin(origin)?; @@ -403,11 +399,12 @@ pub mod pallet { r }); let now = frame_system::Pallet::::block_number(); - let nudge_call = Call::nudge_referendum { index }; + let nudge_call = + T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index }))?; let status = ReferendumStatus { track, origin: *proposal_origin, - proposal_hash, + proposal: proposal.clone(), enactment: enactment_moment, submitted: now, submission_deposit, @@ -419,7 +416,7 @@ pub mod pallet { }; ReferendumInfoFor::::insert(index, ReferendumInfo::Ongoing(status)); - Self::deposit_event(Event::::Submitted { index, track, proposal_hash }); + Self::deposit_event(Event::::Submitted { index, track, proposal }); Ok(()) } @@ -651,7 +648,8 @@ impl, I: 'static> Polling for Pallet { let mut status = ReferendumStatusOf:: { track: class, origin: frame_support::dispatch::RawOrigin::Root.into(), - proposal_hash: ::hash_of(&index), + proposal: T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index })) + .map_err(|_| ())?, enactment: DispatchTime::After(Zero::zero()), submitted: now, submission_deposit: Deposit { who: dummy_account_id, amount: Zero::zero() }, @@ -709,18 +707,18 @@ impl, I: 'static> Pallet { track: &TrackInfoOf, desired: DispatchTime, origin: PalletsOriginOf, - call_hash: T::Hash, + call: BoundedCallOf, ) { let now = frame_system::Pallet::::block_number(); let earliest_allowed = now.saturating_add(track.min_enactment_period); let desired = desired.evaluate(now); let ok = T::Scheduler::schedule_named( - (ASSEMBLY_ID, "enactment", index).encode(), + (ASSEMBLY_ID, "enactment", index).using_encoded(sp_io::hashing::blake2_256), DispatchTime::At(desired.max(earliest_allowed)), None, 63, origin, - MaybeHashed::Hash(call_hash), + call, ) .is_ok(); debug_assert!(ok, "LOGIC ERROR: bake_referendum/schedule_named failed"); @@ -728,7 +726,7 @@ impl, I: 'static> Pallet { /// Set an alarm to dispatch `call` at block number `when`. fn set_alarm( - call: impl Into>, + call: BoundedCallOf, when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); @@ -739,7 +737,7 @@ impl, I: 'static> Pallet { None, 128u8, frame_system::RawOrigin::Root.into(), - MaybeHashed::Value(call.into()), + call, ) .ok() .map(|x| (when, x)); @@ -776,7 +774,7 @@ impl, I: 'static> Pallet { Self::deposit_event(Event::::DecisionStarted { index, tally: status.tally.clone(), - proposal_hash: status.proposal_hash, + proposal: status.proposal.clone(), track: status.track, }); let confirming = if is_passing { @@ -843,12 +841,21 @@ impl, I: 'static> Pallet { let alarm_interval = T::AlarmInterval::get().max(One::one()); let when = (next_block + alarm_interval - One::one()) / alarm_interval * alarm_interval; + let call = match T::Preimages::bound(CallOf::::from(Call::one_fewer_deciding { + track, + })) { + Ok(c) => c, + Err(_) => { + debug_assert!(false, "Unable to create a bounded call from `one_fewer_deciding`??",); + return + }, + }; let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, 128u8, frame_system::RawOrigin::Root.into(), - MaybeHashed::Value(Call::one_fewer_deciding { track }.into()), + call, ); debug_assert!( maybe_result.is_ok(), @@ -871,7 +878,18 @@ impl, I: 'static> Pallet { if status.alarm.as_ref().map_or(true, |&(when, _)| when != alarm) { // Either no alarm or one that was different Self::ensure_no_alarm(status); - status.alarm = Self::set_alarm(Call::nudge_referendum { index }, alarm); + let call = + match T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index })) { + Ok(c) => c, + Err(_) => { + debug_assert!( + false, + "Unable to create a bounded call from `nudge_referendum`??", + ); + return false + }, + }; + status.alarm = Self::set_alarm(call, alarm); true } else { false @@ -987,14 +1005,8 @@ impl, I: 'static> Pallet { // Passed! Self::ensure_no_alarm(&mut status); Self::note_one_fewer_deciding(status.track); - let (desired, call_hash) = (status.enactment, status.proposal_hash); - Self::schedule_enactment( - index, - track, - desired, - status.origin, - call_hash, - ); + let (desired, call) = (status.enactment, status.proposal); + Self::schedule_enactment(index, track, desired, status.origin, call); Self::deposit_event(Event::::Confirmed { index, tally: status.tally, diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index 920e529bf05ca..c98fbf9a676b1 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -24,7 +24,7 @@ use frame_support::{ assert_ok, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, - PreimageRecipient, SortedMembers, + SortedMembers, }, weights::Weight, }; @@ -32,7 +32,7 @@ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_runtime::{ testing::Header, - traits::{BlakeTwo256, Hash, IdentityLookup}, + traits::{BlakeTwo256, IdentityLookup}, DispatchResult, Perbill, }; @@ -97,7 +97,6 @@ impl pallet_preimage::Config for Test { type WeightInfo = (); type Currency = Balances; type ManagerOrigin = EnsureRoot; - type MaxSize = ConstU32<4096>; type BaseDeposit = (); type ByteDeposit = (); } @@ -111,8 +110,7 @@ impl pallet_scheduler::Config for Test { type MaxScheduledPerBlock = ConstU32<100>; type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = Preimage; - type NoPreimagePostponement = ConstU64<10>; + type Preimages = Preimage; } impl pallet_balances::Config for Test { type MaxReserves = (); @@ -229,6 +227,7 @@ impl Config for Test { type UndecidingTimeout = ConstU64<20>; type AlarmInterval = AlarmInterval; type Tracks = TestTracksInfo; + type Preimages = Preimage; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -306,14 +305,13 @@ pub fn set_balance_proposal(value: u64) -> Vec { .encode() } -pub fn set_balance_proposal_hash(value: u64) -> H256 { +pub fn set_balance_proposal_bounded(value: u64) -> BoundedCallOf { let c = RuntimeCall::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0, }); - >::note_preimage(c.encode().try_into().unwrap()); - BlakeTwo256::hash_of(&c) + ::bound(c).unwrap() } #[allow(dead_code)] @@ -321,7 +319,7 @@ pub fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { Referenda::submit( RuntimeOrigin::signed(who), Box::new(frame_system::RawOrigin::Root.into()), - set_balance_proposal_hash(value), + set_balance_proposal_bounded(value), DispatchTime::After(delay), ) } @@ -449,7 +447,7 @@ impl RefState { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(frame_support::dispatch::RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(10), )); assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); diff --git a/frame/referenda/src/tests.rs b/frame/referenda/src/tests.rs index 778d00e516693..355ce3021b87f 100644 --- a/frame/referenda/src/tests.rs +++ b/frame/referenda/src/tests.rs @@ -44,7 +44,7 @@ fn basic_happy_path_works() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(10), )); assert_eq!(Balances::reserved_balance(&1), 2); @@ -175,7 +175,7 @@ fn queueing_works() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(5), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(0), + set_balance_proposal_bounded(0), DispatchTime::After(0), )); assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(5), 0)); @@ -187,7 +187,7 @@ fn queueing_works() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(i), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(i), + set_balance_proposal_bounded(i), DispatchTime::After(0), )); assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(i), i as u32)); @@ -272,7 +272,7 @@ fn auto_timeout_should_happen_with_nothing_but_submit() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(20), )); run_to(20); @@ -292,13 +292,13 @@ fn tracks_are_distinguished() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(10), )); assert_ok!(Referenda::submit( RuntimeOrigin::signed(2), Box::new(RawOrigin::None.into()), - set_balance_proposal_hash(2), + set_balance_proposal_bounded(2), DispatchTime::At(20), )); @@ -315,7 +315,7 @@ fn tracks_are_distinguished() { ReferendumInfo::Ongoing(ReferendumStatus { track: 0, origin: OriginCaller::system(RawOrigin::Root), - proposal_hash: set_balance_proposal_hash(1), + proposal: set_balance_proposal_bounded(1), enactment: DispatchTime::At(10), submitted: 1, submission_deposit: Deposit { who: 1, amount: 2 }, @@ -331,7 +331,7 @@ fn tracks_are_distinguished() { ReferendumInfo::Ongoing(ReferendumStatus { track: 1, origin: OriginCaller::system(RawOrigin::None), - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal_bounded(2), enactment: DispatchTime::At(20), submitted: 1, submission_deposit: Deposit { who: 2, amount: 2 }, @@ -350,13 +350,13 @@ fn tracks_are_distinguished() { #[test] fn submit_errors_work() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); // No track for Signed origins. assert_noop!( Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Signed(2).into()), - h, + h.clone(), DispatchTime::At(10), ), Error::::NoTrack @@ -381,7 +381,7 @@ fn decision_deposit_errors_work() { let e = Error::::NotOngoing; assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0), e); - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -403,7 +403,7 @@ fn refund_deposit_works() { let e = Error::::BadReferendum; assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(1), 0), e); - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -425,7 +425,7 @@ fn refund_deposit_works() { #[test] fn cancel_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -444,7 +444,7 @@ fn cancel_works() { #[test] fn cancel_errors_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -462,7 +462,7 @@ fn cancel_errors_works() { #[test] fn kill_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -482,7 +482,7 @@ fn kill_works() { #[test] fn kill_errors_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index a6311e5f925be..2ce93cb6adc3c 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -19,7 +19,10 @@ use super::*; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::{traits::schedule::Anon, Parameter}; +use frame_support::{ + traits::{schedule::v3::Anon, Bounded}, + Parameter, +}; use scale_info::TypeInfo; use sp_arithmetic::{Rounding::*, SignedRounding::*}; use sp_runtime::{FixedI64, PerThing, RuntimeDebug}; @@ -31,6 +34,7 @@ pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; pub type CallOf = >::RuntimeCall; +pub type BoundedCallOf = Bounded<>::RuntimeCall>; pub type VotesOf = >::Votes; pub type TallyOf = >::Tally; pub type PalletsOriginOf = @@ -39,7 +43,7 @@ pub type ReferendumInfoOf = ReferendumInfo< TrackIdOf, PalletsOriginOf, ::BlockNumber, - ::Hash, + BoundedCallOf, BalanceOf, TallyOf, ::AccountId, @@ -49,7 +53,7 @@ pub type ReferendumStatusOf = ReferendumStatus< TrackIdOf, PalletsOriginOf, ::BlockNumber, - ::Hash, + BoundedCallOf, BalanceOf, TallyOf, ::AccountId, @@ -160,7 +164,7 @@ pub struct ReferendumStatus< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -171,7 +175,7 @@ pub struct ReferendumStatus< /// The origin for this referendum. pub(crate) origin: RuntimeOrigin, /// The hash of the proposal up for referendum. - pub(crate) proposal_hash: Hash, + pub(crate) proposal: Call, /// The time the proposal should be scheduled for enactment. pub(crate) enactment: DispatchTime, /// The time of submission. Once `UndecidingTimeout` passes, it may be closed by anyone if it @@ -197,7 +201,7 @@ pub enum ReferendumInfo< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -209,7 +213,7 @@ pub enum ReferendumInfo< TrackId, RuntimeOrigin, Moment, - Hash, + Call, Balance, Tally, AccountId, @@ -232,12 +236,12 @@ impl< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, ScheduleAddress: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - > ReferendumInfo + > ReferendumInfo { /// Take the Decision Deposit from `self`, if there is one. Returns an `Err` if `self` is not /// in a valid state for the Decision Deposit to be refunded. diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 9397c66170425..aaa30fd88ffda 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -18,201 +18,219 @@ //! Scheduler pallet benchmarking. use super::*; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{account, benchmarks}; use frame_support::{ ensure, - traits::{OnInitialize, PreimageProvider, PreimageRecipient}, + traits::{schedule::Priority, BoundedInline}, }; -use sp_runtime::traits::Hash; +use frame_system::RawOrigin; use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; -use frame_system::Pallet as System; +use frame_system::Call as SystemCall; + +const SEED: u32 = 0; const BLOCK_NUMBER: u32 = 2; type SystemOrigin = ::RuntimeOrigin; -/// Add `n` named items to the schedule. +/// Add `n` items to the schedule. /// /// For `resolved`: +/// - ` /// - `None`: aborted (hash without preimage) /// - `Some(true)`: hash resolves into call if possible, plain call otherwise /// - `Some(false)`: plain call -fn fill_schedule( - when: T::BlockNumber, - n: u32, +fn fill_schedule(when: T::BlockNumber, n: u32) -> Result<(), &'static str> { + let t = DispatchTime::At(when); + let origin: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); + for i in 0..n { + let call = make_call::(None); + let period = Some(((i + 100).into(), 100)); + let name = u32_to_name(i); + Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; + } + ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); + Ok(()) +} + +fn u32_to_name(i: u32) -> TaskName { + i.using_encoded(blake2_256) +} + +fn make_task( periodic: bool, named: bool, - resolved: Option, -) -> Result<(), &'static str> { - for i in 0..n { - // Named schedule is strictly heavier than anonymous - let (call, hash) = call_and_hash::(i); - let call_or_hash = match resolved { - Some(true) => { - T::PreimageProvider::note_preimage(call.encode().try_into().unwrap()); - if T::PreimageProvider::have_preimage(&hash) { - CallOrHashOf::::Hash(hash) - } else { - call.into() - } + signed: bool, + maybe_lookup_len: Option, + priority: Priority, +) -> ScheduledOf { + let call = make_call::(maybe_lookup_len); + let maybe_periodic = match periodic { + true => Some((100u32.into(), 100)), + false => None, + }; + let maybe_id = match named { + true => Some(u32_to_name(0)), + false => None, + }; + let origin = make_origin::(signed); + Scheduled { maybe_id, priority, call, maybe_periodic, origin, _phantom: PhantomData } +} + +fn bounded(len: u32) -> Option::RuntimeCall>> { + let call = + <::RuntimeCall>::from(SystemCall::remark { remark: vec![0; len as usize] }); + T::Preimages::bound(call).ok() +} + +fn make_call(maybe_lookup_len: Option) -> Bounded<::RuntimeCall> { + let bound = BoundedInline::bound() as u32; + let mut len = match maybe_lookup_len { + Some(len) => len.min(T::Preimages::MAX_LENGTH as u32 - 2).max(bound) - 3, + None => bound.saturating_sub(4), + }; + + loop { + let c = match bounded::(len) { + Some(x) => x, + None => { + len -= 1; + continue }, - Some(false) => call.into(), - None => CallOrHashOf::::Hash(hash), }; - let period = match periodic { - true => Some(((i + 100).into(), 100)), - false => None, - }; - let t = DispatchTime::At(when); - let origin = frame_system::RawOrigin::Root.into(); - if named { - Scheduler::::do_schedule_named(i.encode(), t, period, 0, origin, call_or_hash)?; + if c.lookup_needed() == maybe_lookup_len.is_some() { + break c + } + if maybe_lookup_len.is_some() { + len += 1; } else { - Scheduler::::do_schedule(t, period, 0, origin, call_or_hash)?; + if len > 0 { + len -= 1; + } else { + break c + } } } - ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); - Ok(()) } -fn call_and_hash(i: u32) -> (::RuntimeCall, T::Hash) { - // Essentially a no-op call. - let call: ::RuntimeCall = frame_system::Call::remark { remark: i.encode() }.into(); - let hash = T::Hashing::hash_of(&call); - (call, hash) +fn make_origin(signed: bool) -> ::PalletsOrigin { + match signed { + true => frame_system::RawOrigin::Signed(account("origin", 0, SEED)).into(), + false => frame_system::RawOrigin::Root.into(), + } +} + +fn dummy_counter() -> WeightCounter { + WeightCounter { used: Weight::zero(), limit: Weight::MAX } } benchmarks! { - on_initialize_periodic_named_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } + // `service_agendas` when no work is done. + service_agendas_base { + let now = T::BlockNumber::from(BLOCK_NUMBER); + IncompleteSince::::put(now - One::one()); + }: { + Scheduler::::service_agendas(&mut dummy_counter(), now, 0); + } verify { + assert_eq!(IncompleteSince::::get(), Some(now - One::one())); } - on_initialize_named_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, true, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - assert!(Agenda::::iter().count() == 0); + // `service_agenda` when no work is done. + service_agenda_base { + let now = BLOCK_NUMBER.into(); + let s in 0 .. T::MaxScheduledPerBlock::get(); + fill_schedule::(now, s)?; + let mut executed = 0; + }: { + Scheduler::::service_agenda(&mut dummy_counter(), &mut executed, now, now, 0); + } verify { + assert_eq!(executed, 0); } - on_initialize_periodic_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, false, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } + // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not + // dispatched (e.g. due to being overweight). + service_task_base { + let now = BLOCK_NUMBER.into(); + let task = make_task::(false, false, false, None, 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { + //assert_eq!(result, Ok(())); } - on_initialize_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, false, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - assert!(Agenda::::iter().count() == 0); + // `service_task` when the task is a non-periodic, non-named, fetched call (with a known + // preimage length) and which is not dispatched (e.g. due to being overweight). + service_task_fetched { + let s in (BoundedInline::bound() as u32) .. (T::Preimages::MAX_LENGTH as u32); + let now = BLOCK_NUMBER.into(); + let task = make_task::(false, false, false, Some(s), 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } - on_initialize_named_aborted { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, true, None)?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), 0); - if let Some(delay) = T::NoPreimagePostponement::get() { - assert_eq!(Agenda::::get(when + delay).len(), s as usize); - } else { - assert!(Agenda::::iter().count() == 0); - } + // `service_task` when the task is a non-periodic, named, non-fetched call which is not + // dispatched (e.g. due to being overweight). + service_task_named { + let now = BLOCK_NUMBER.into(); + let task = make_task::(false, true, false, None, 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } - on_initialize_aborted { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, false, None)?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), 0); - if let Some(delay) = T::NoPreimagePostponement::get() { - assert_eq!(Agenda::::get(when + delay).len(), s as usize); - } else { - assert!(Agenda::::iter().count() == 0); - } + // `service_task` when the task is a periodic, non-named, non-fetched call which is not + // dispatched (e.g. due to being overweight). + service_task_periodic { + let now = BLOCK_NUMBER.into(); + let task = make_task::(true, false, false, None, 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } - on_initialize_periodic_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(false))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } + // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. + execute_dispatch_signed { + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; + let origin = make_origin::(true); + let call = T::Preimages::realize(&make_call::(None)).unwrap().0; + }: { + assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); } - - on_initialize_periodic { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, false, Some(false))?; - }: { Scheduler::::on_initialize(when); } verify { - assert_eq!(System::::event_count(), s); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } } - on_initialize_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, true, Some(false))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s); - assert!(Agenda::::iter().count() == 0); + // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. + execute_dispatch_unsigned { + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; + let origin = make_origin::(false); + let call = T::Preimages::realize(&make_call::(None)).unwrap().0; + }: { + assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); } - - on_initialize { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, false, Some(false))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } verify { - assert_eq!(System::::event_count(), s); - assert!(Agenda::::iter().count() == 0); } schedule { - let s in 0 .. T::MaxScheduledPerBlock::get(); + let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let inner_call = frame_system::Call::set_storage { items: vec![] }.into(); - let call = Box::new(CallOrHashOf::::Value(inner_call)); + let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); - fill_schedule::(when, s, true, true, Some(false))?; - let schedule_origin = T::ScheduleOrigin::successful_origin(); - }: _>(schedule_origin, when, periodic, priority, call) + fill_schedule::(when, s)?; + }: _(RawOrigin::Root, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -224,13 +242,13 @@ benchmarks! { let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(false))?; + fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); let schedule_origin = T::ScheduleOrigin::successful_origin(); }: _>(schedule_origin, when, 0) verify { ensure!( - Lookup::::get(0.encode()).is_none(), + Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup" ); // Removed schedule is NONE @@ -241,18 +259,16 @@ benchmarks! { } schedule_named { - let s in 0 .. T::MaxScheduledPerBlock::get(); - let id = s.encode(); + let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + let id = u32_to_name(s); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let inner_call = frame_system::Call::set_storage { items: vec![] }.into(); - let call = Box::new(CallOrHashOf::::Value(inner_call)); + let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); - fill_schedule::(when, s, true, true, Some(false))?; - let schedule_origin = T::ScheduleOrigin::successful_origin(); - }: _>(schedule_origin, id, when, periodic, priority, call) + fill_schedule::(when, s)?; + }: _(RawOrigin::Root, id, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -264,12 +280,11 @@ benchmarks! { let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(false))?; - let schedule_origin = T::ScheduleOrigin::successful_origin(); - }: _>(schedule_origin, 0.encode()) + fill_schedule::(when, s)?; + }: _(RawOrigin::Root, u32_to_name(0)) verify { ensure!( - Lookup::::get(0.encode()).is_none(), + Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup" ); // Removed schedule is NONE diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 143fa37a9261d..b5ea0deeba9a3 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -52,27 +52,33 @@ #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; #[cfg(test)] mod mock; #[cfg(test)] mod tests; pub mod weights; -use codec::{Codec, Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::{DispatchError, DispatchResult, Dispatchable, GetDispatchInfo, Parameter}, + dispatch::{ + DispatchError, DispatchResult, Dispatchable, GetDispatchInfo, Parameter, RawOrigin, + }, + ensure, traits::{ schedule::{self, DispatchTime, MaybeHashed}, - EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess, PrivilegeCmp, StorageVersion, + Bounded, CallerTrait, EnsureOrigin, Get, Hash as PreimageHash, IsType, OriginTrait, + PalletInfoAccess, PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, }, weights::Weight, }; -use frame_system::{self as system, ensure_signed}; +use frame_system::{self as system}; pub use pallet::*; use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; use sp_runtime::{ traits::{BadOrigin, One, Saturating, Zero}, - RuntimeDebug, + BoundedVec, RuntimeDebug, }; use sp_std::{borrow::Borrow, cmp::Ordering, marker::PhantomData, prelude::*}; pub use weights::WeightInfo; @@ -96,24 +102,25 @@ struct ScheduledV1 { /// Information regarding an item to be executed in the future. #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] -#[derive(Clone, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct ScheduledV3 { +#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct Scheduled { /// The unique identity for this task, if there is one. - maybe_id: Option>, + maybe_id: Option, /// This task's priority. priority: schedule::Priority, /// The call to be dispatched. call: Call, /// If the call is periodic, then this points to the information concerning that. maybe_periodic: Option>, - /// The origin to dispatch the call. + /// The origin with which to dispatch the call. origin: PalletsOrigin, _phantom: PhantomData, } -use crate::ScheduledV3 as ScheduledV2; +use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2}; -pub type ScheduledV2Of = ScheduledV3< +pub type ScheduledV2Of = ScheduledV2< + Vec, ::RuntimeCall, ::BlockNumber, ::PalletsOrigin, @@ -121,57 +128,54 @@ pub type ScheduledV2Of = ScheduledV3< >; pub type ScheduledV3Of = ScheduledV3< + Vec, CallOrHashOf, ::BlockNumber, ::PalletsOrigin, ::AccountId, >; -pub type ScheduledOf = ScheduledV3Of; - -/// The current version of Scheduled struct. -pub type Scheduled = - ScheduledV2; +pub type ScheduledOf = Scheduled< + TaskName, + Bounded<::RuntimeCall>, + ::BlockNumber, + ::PalletsOrigin, + ::AccountId, +>; -#[cfg(feature = "runtime-benchmarks")] -mod preimage_provider { - use frame_support::traits::PreimageRecipient; - pub trait PreimageProviderAndMaybeRecipient: PreimageRecipient {} - impl> PreimageProviderAndMaybeRecipient for T {} +struct WeightCounter { + used: Weight, + limit: Weight, } - -#[cfg(not(feature = "runtime-benchmarks"))] -mod preimage_provider { - use frame_support::traits::PreimageProvider; - pub trait PreimageProviderAndMaybeRecipient: PreimageProvider {} - impl> PreimageProviderAndMaybeRecipient for T {} +impl WeightCounter { + fn check_accrue(&mut self, w: Weight) -> bool { + let test = self.used.saturating_add(w); + if test.any_gt(self.limit) { + false + } else { + self.used = test; + true + } + } + fn can_accrue(&mut self, w: Weight) -> bool { + self.used.saturating_add(w).all_lte(self.limit) + } } -pub use preimage_provider::PreimageProviderAndMaybeRecipient; - pub(crate) trait MarginalWeightInfo: WeightInfo { - fn item(periodic: bool, named: bool, resolved: Option) -> Weight { - match (periodic, named, resolved) { - (_, false, None) => Self::on_initialize_aborted(2) - Self::on_initialize_aborted(1), - (_, true, None) => - Self::on_initialize_named_aborted(2) - Self::on_initialize_named_aborted(1), - (false, false, Some(false)) => Self::on_initialize(2) - Self::on_initialize(1), - (false, true, Some(false)) => - Self::on_initialize_named(2) - Self::on_initialize_named(1), - (true, false, Some(false)) => - Self::on_initialize_periodic(2) - Self::on_initialize_periodic(1), - (true, true, Some(false)) => - Self::on_initialize_periodic_named(2) - Self::on_initialize_periodic_named(1), - (false, false, Some(true)) => - Self::on_initialize_resolved(2) - Self::on_initialize_resolved(1), - (false, true, Some(true)) => - Self::on_initialize_named_resolved(2) - Self::on_initialize_named_resolved(1), - (true, false, Some(true)) => - Self::on_initialize_periodic_resolved(2) - Self::on_initialize_periodic_resolved(1), - (true, true, Some(true)) => - Self::on_initialize_periodic_named_resolved(2) - - Self::on_initialize_periodic_named_resolved(1), + fn service_task(maybe_lookup_len: Option, named: bool, periodic: bool) -> Weight { + let base = Self::service_task_base(); + let mut total = match maybe_lookup_len { + None => base, + Some(l) => Self::service_task_fetched(l as u32), + }; + if named { + total.saturating_accrue(Self::service_task_named().saturating_sub(base)); } + if periodic { + total.saturating_accrue(Self::service_task_periodic().saturating_sub(base)); + } + total } } impl MarginalWeightInfo for T {} @@ -179,11 +183,7 @@ impl MarginalWeightInfo for T {} #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{ - dispatch::PostDispatchInfo, - pallet_prelude::*, - traits::{schedule::LookupError, PreimageProvider}, - }; + use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; /// The current storage version. @@ -192,7 +192,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::storage_version(STORAGE_VERSION)] - #[pallet::without_storage_info] pub struct Pallet(_); /// `system::Config` should always be included in our implied traits. @@ -207,7 +206,9 @@ pub mod pallet { + IsType<::RuntimeOrigin>; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: From> + Codec + Clone + Eq + TypeInfo; + type PalletsOrigin: From> + + CallerTrait + + MaxEncodedLen; /// The aggregated call type. type RuntimeCall: Parameter @@ -217,8 +218,7 @@ pub mod pallet { > + GetDispatchInfo + From>; - /// The maximum weight that may be scheduled per block for any dispatchables of less - /// priority than `schedule::HARD_DEADLINE`. + /// The maximum weight that may be scheduled per block for any dispatchables. #[pallet::constant] type MaximumWeight: Get; @@ -235,7 +235,6 @@ pub mod pallet { type OriginPrivilegeCmp: PrivilegeCmp; /// The maximum number of scheduled calls in the queue for a single block. - /// Not strictly enforced, but used for weight estimation. #[pallet::constant] type MaxScheduledPerBlock: Get; @@ -243,21 +242,29 @@ pub mod pallet { type WeightInfo: WeightInfo; /// The preimage provider with which we look up call hashes to get the call. - type PreimageProvider: PreimageProviderAndMaybeRecipient; - - /// If `Some` then the number of blocks to postpone execution for when the item is delayed. - type NoPreimagePostponement: Get>; + type Preimages: QueryPreimage + StorePreimage; } - /// Items to be executed, indexed by the block number that they should be executed on. #[pallet::storage] - pub type Agenda = - StorageMap<_, Twox64Concat, T::BlockNumber, Vec>>, ValueQuery>; + pub type IncompleteSince = StorageValue<_, T::BlockNumber>; - /// Lookup from identity to the block number and index of the task. + /// Items to be executed, indexed by the block number that they should be executed on. + #[pallet::storage] + pub type Agenda = StorageMap< + _, + Twox64Concat, + T::BlockNumber, + BoundedVec>, T::MaxScheduledPerBlock>, + ValueQuery, + >; + + /// Lookup from a name to the block number and index of the task. + /// + /// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4 + /// identities. #[pallet::storage] pub(crate) type Lookup = - StorageMap<_, Twox64Concat, Vec, TaskAddress>; + StorageMap<_, Twox64Concat, TaskName, TaskAddress>; /// Events type. #[pallet::event] @@ -270,15 +277,15 @@ pub mod pallet { /// Dispatched some task. Dispatched { task: TaskAddress, - id: Option>, + id: Option<[u8; 32]>, result: DispatchResult, }, /// The call for the provided hash was not found so the task has been aborted. - CallLookupFailed { - task: TaskAddress, - id: Option>, - error: LookupError, - }, + CallUnavailable { task: TaskAddress, id: Option<[u8; 32]> }, + /// The given task was unable to be renewed since the agenda is full at that block. + PeriodicFailed { task: TaskAddress, id: Option<[u8; 32]> }, + /// The given task can never be executed since it is overweight. + PermanentlyOverweight { task: TaskAddress, id: Option<[u8; 32]> }, } #[pallet::error] @@ -291,134 +298,18 @@ pub mod pallet { TargetBlockNumberInPast, /// Reschedule failed because it does not change scheduled time. RescheduleNoChange, + /// Attempt to use a non-named function on a named task. + Named, } #[pallet::hooks] impl Hooks> for Pallet { /// Execute the scheduled calls fn on_initialize(now: T::BlockNumber) -> Weight { - let limit = T::MaximumWeight::get(); - - let mut queued = Agenda::::take(now) - .into_iter() - .enumerate() - .filter_map(|(index, s)| Some((index as u32, s?))) - .collect::>(); - - if queued.len() as u32 > T::MaxScheduledPerBlock::get() { - log::warn!( - target: "runtime::scheduler", - "Warning: This block has more items queued in Scheduler than \ - expected from the runtime configuration. An update might be needed." - ); - } - - queued.sort_by_key(|(_, s)| s.priority); - - let next = now + One::one(); - - let mut total_weight: Weight = T::WeightInfo::on_initialize(0); - for (order, (index, mut s)) in queued.into_iter().enumerate() { - let named = if let Some(ref id) = s.maybe_id { - Lookup::::remove(id); - true - } else { - false - }; - - let (call, maybe_completed) = s.call.resolved::(); - s.call = call; - - let resolved = if let Some(completed) = maybe_completed { - T::PreimageProvider::unrequest_preimage(&completed); - true - } else { - false - }; - - let call = match s.call.as_value().cloned() { - Some(c) => c, - None => { - // Preimage not available - postpone until some block. - total_weight.saturating_accrue(T::WeightInfo::item(false, named, None)); - if let Some(delay) = T::NoPreimagePostponement::get() { - let until = now.saturating_add(delay); - if let Some(ref id) = s.maybe_id { - let index = Agenda::::decode_len(until).unwrap_or(0); - Lookup::::insert(id, (until, index as u32)); - } - Agenda::::append(until, Some(s)); - } - continue - }, - }; - - let periodic = s.maybe_periodic.is_some(); - let call_weight = call.get_dispatch_info().weight; - let mut item_weight = T::WeightInfo::item(periodic, named, Some(resolved)); - let origin = <::RuntimeOrigin as From>::from( - s.origin.clone(), - ) - .into(); - if ensure_signed(origin).is_ok() { - // Weights of Signed dispatches expect their signing account to be whitelisted. - item_weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - } - - // We allow a scheduled call if any is true: - // - It's priority is `HARD_DEADLINE` - // - It does not push the weight past the limit. - // - It is the first item in the schedule - let hard_deadline = s.priority <= schedule::HARD_DEADLINE; - let test_weight = - total_weight.saturating_add(call_weight).saturating_add(item_weight); - if !hard_deadline && order > 0 && test_weight.any_gt(limit) { - // Cannot be scheduled this block - postpone until next. - total_weight.saturating_accrue(T::WeightInfo::item(false, named, None)); - if let Some(ref id) = s.maybe_id { - // NOTE: We could reasonably not do this (in which case there would be one - // block where the named and delayed item could not be referenced by name), - // but we will do it anyway since it should be mostly free in terms of - // weight and it is slightly cleaner. - let index = Agenda::::decode_len(next).unwrap_or(0); - Lookup::::insert(id, (next, index as u32)); - } - Agenda::::append(next, Some(s)); - continue - } - - let dispatch_origin = s.origin.clone().into(); - let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { - Ok(post_info) => (post_info.actual_weight, Ok(())), - Err(error_and_info) => - (error_and_info.post_info.actual_weight, Err(error_and_info.error)), - }; - let actual_call_weight = maybe_actual_call_weight.unwrap_or(call_weight); - total_weight.saturating_accrue(item_weight); - total_weight.saturating_accrue(actual_call_weight); - - Self::deposit_event(Event::Dispatched { - task: (now, index), - id: s.maybe_id.clone(), - result, - }); - - if let &Some((period, count)) = &s.maybe_periodic { - if count > 1 { - s.maybe_periodic = Some((period, count - 1)); - } else { - s.maybe_periodic = None; - } - let wake = now + period; - // If scheduled is named, place its information in `Lookup` - if let Some(ref id) = s.maybe_id { - let wake_index = Agenda::::decode_len(wake).unwrap_or(0); - Lookup::::insert(id, (wake, wake_index as u32)); - } - Agenda::::append(wake, Some(s)); - } - } - total_weight + let mut weight_counter = + WeightCounter { used: Weight::zero(), limit: T::MaximumWeight::get() }; + Self::service_agendas(&mut weight_counter, now, u32::max_value()); + weight_counter.used } } @@ -431,7 +322,7 @@ pub mod pallet { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -440,7 +331,7 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } @@ -458,11 +349,11 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named( origin: OriginFor, - id: Vec, + id: TaskName, when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -472,14 +363,14 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } /// Cancel a named scheduled task. #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] - pub fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { + pub fn cancel_named(origin: OriginFor, id: TaskName) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; @@ -497,7 +388,7 @@ pub mod pallet { after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -506,7 +397,7 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } @@ -519,11 +410,11 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named_after( origin: OriginFor, - id: Vec, + id: TaskName, after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -533,41 +424,70 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } } } -impl Pallet { - /// Migrate storage format from V1 to V3. +impl> Pallet { + /// Migrate storage format from V1 to V4. /// /// Returns the weight consumed by this migration. - pub fn migrate_v1_to_v3() -> Weight { + pub fn migrate_v1_to_v4() -> Weight { + use migration::v1 as old; let mut weight = T::DbWeight::get().reads_writes(1, 1); + // Delete all undecodable values. + // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. + let keys = old::Agenda::::iter_keys().collect::>(); + for key in keys { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Err(_) = old::Agenda::::try_get(&key) { + weight.saturating_accrue(T::DbWeight::get().writes(1)); + old::Agenda::::remove(&key); + log::warn!("Deleted undecodable agenda"); + } + } + Agenda::::translate::< Vec::RuntimeCall, T::BlockNumber>>>, _, >(|_, agenda| { - Some( + Some(BoundedVec::truncate_from( agenda .into_iter() .map(|schedule| { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - schedule.map(|schedule| ScheduledV3 { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call.into(), - maybe_periodic: schedule.maybe_periodic, - origin: system::RawOrigin::Root.into(), - _phantom: Default::default(), + schedule.and_then(|schedule| { + if let Some(id) = schedule.maybe_id.as_ref() { + let name = blake2_256(id); + if let Some(item) = old::Lookup::::take(id) { + Lookup::::insert(name, item); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + + let call = T::Preimages::bound(schedule.call).ok()?; + + if call.lookup_needed() { + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + } + + Some(Scheduled { + maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), + priority: schedule.priority, + call, + maybe_periodic: schedule.maybe_periodic, + origin: system::RawOrigin::Root.into(), + _phantom: Default::default(), + }) }) }) .collect::>(), - ) + )) }); #[allow(deprecated)] @@ -577,34 +497,62 @@ impl Pallet { &[], ); - StorageVersion::new(3).put::(); + StorageVersion::new(4).put::(); weight + T::DbWeight::get().writes(2) } - /// Migrate storage format from V2 to V3. + /// Migrate storage format from V2 to V4. /// /// Returns the weight consumed by this migration. - pub fn migrate_v2_to_v3() -> Weight { + pub fn migrate_v2_to_v4() -> Weight { + use migration::v2 as old; let mut weight = T::DbWeight::get().reads_writes(1, 1); + // Delete all undecodable values. + // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. + let keys = old::Agenda::::iter_keys().collect::>(); + for key in keys { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Err(_) = old::Agenda::::try_get(&key) { + weight.saturating_accrue(T::DbWeight::get().writes(1)); + old::Agenda::::remove(&key); + log::warn!("Deleted undecodable agenda"); + } + } + Agenda::::translate::>>, _>(|_, agenda| { - Some( + Some(BoundedVec::truncate_from( agenda .into_iter() .map(|schedule| { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - schedule.map(|schedule| ScheduledV3 { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call.into(), - maybe_periodic: schedule.maybe_periodic, - origin: schedule.origin, - _phantom: Default::default(), + schedule.and_then(|schedule| { + if let Some(id) = schedule.maybe_id.as_ref() { + let name = blake2_256(id); + if let Some(item) = old::Lookup::::take(id) { + Lookup::::insert(name, item); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + + let call = T::Preimages::bound(schedule.call).ok()?; + if call.lookup_needed() { + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + } + + Some(Scheduled { + maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), + priority: schedule.priority, + call, + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin, + _phantom: Default::default(), + }) }) }) .collect::>(), - ) + )) }); #[allow(deprecated)] @@ -614,34 +562,140 @@ impl Pallet { &[], ); - StorageVersion::new(3).put::(); + StorageVersion::new(4).put::(); weight + T::DbWeight::get().writes(2) } - #[cfg(feature = "try-runtime")] - pub fn pre_migrate_to_v3() -> Result<(), &'static str> { - Ok(()) - } + /// Migrate storage format from V3 to V4. + /// + /// Returns the weight consumed by this migration. + #[allow(deprecated)] + pub fn migrate_v3_to_v4() -> Weight { + use migration::v3 as old; + let mut weight = T::DbWeight::get().reads_writes(2, 1); + + // Delete all undecodable values. + // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. + let blocks = old::Agenda::::iter_keys().collect::>(); + for block in blocks { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Err(_) = old::Agenda::::try_get(&block) { + weight.saturating_accrue(T::DbWeight::get().writes(1)); + old::Agenda::::remove(&block); + log::warn!("Deleted undecodable agenda of block: {:?}", block); + } + } - #[cfg(feature = "try-runtime")] - pub fn post_migrate_to_v3() -> Result<(), &'static str> { - use frame_support::dispatch::GetStorageVersion; + Agenda::::translate::>>, _>(|block, agenda| { + log::info!("Migrating agenda of block: {:?}", &block); + Some(BoundedVec::truncate_from( + agenda + .into_iter() + .map(|schedule| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + schedule + .and_then(|schedule| { + if let Some(id) = schedule.maybe_id.as_ref() { + let name = blake2_256(id); + if let Some(item) = old::Lookup::::take(id) { + Lookup::::insert(name, item); + log::info!("Migrated name for id: {:?}", id); + } else { + log::error!("No name in Lookup for id: {:?}", &id); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } else { + log::info!("Schedule is unnamed"); + } + + let call = match schedule.call { + MaybeHashed::Hash(h) => { + let bounded = Bounded::from_legacy_hash(h); + // Check that the call can be decoded in the new runtime. + if let Err(err) = T::Preimages::peek::< + ::RuntimeCall, + >(&bounded) + { + log::error!( + "Dropping undecodable call {}: {:?}", + &h, + &err + ); + return None + } + weight.saturating_accrue(T::DbWeight::get().reads(1)); + log::info!("Migrated call by hash, hash: {:?}", h); + bounded + }, + MaybeHashed::Value(v) => { + let call = T::Preimages::bound(v) + .map_err(|e| { + log::error!("Could not bound Call: {:?}", e) + }) + .ok()?; + if call.lookup_needed() { + weight.saturating_accrue( + T::DbWeight::get().reads_writes(0, 1), + ); + } + log::info!( + "Migrated call by value, hash: {:?}", + call.hash() + ); + call + }, + }; + + Some(Scheduled { + maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), + priority: schedule.priority, + call, + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin, + _phantom: Default::default(), + }) + }) + .or_else(|| { + log::info!("Schedule in agenda for block {:?} is empty - nothing to do here.", &block); + None + }) + }) + .collect::>(), + )) + }); - assert!(Self::current_storage_version() == 3); - for k in Agenda::::iter_keys() { - let _ = Agenda::::try_get(k).map_err(|()| "Invalid item in Agenda")?; - } - Ok(()) + #[allow(deprecated)] + frame_support::storage::migration::remove_storage_prefix( + Self::name().as_bytes(), + b"StorageVersion", + &[], + ); + + StorageVersion::new(4).put::(); + + weight + T::DbWeight::get().writes(2) } +} +impl Pallet { /// Helper to migrate scheduler when the pallet origin type has changed. pub fn migrate_origin + codec::Decode>() { Agenda::::translate::< - Vec, T::BlockNumber, OldOrigin, T::AccountId>>>, + Vec< + Option< + Scheduled< + TaskName, + Bounded<::RuntimeCall>, + T::BlockNumber, + OldOrigin, + T::AccountId, + >, + >, + >, _, >(|_, agenda| { - Some( + Some(BoundedVec::truncate_from( agenda .into_iter() .map(|schedule| { @@ -655,7 +709,7 @@ impl Pallet { }) }) .collect::>(), - ) + )) }); } @@ -676,34 +730,64 @@ impl Pallet { Ok(when) } + fn place_task( + when: T::BlockNumber, + what: ScheduledOf, + ) -> Result, (DispatchError, ScheduledOf)> { + let maybe_name = what.maybe_id; + let index = Self::push_to_agenda(when, what)?; + let address = (when, index); + if let Some(name) = maybe_name { + Lookup::::insert(name, address) + } + Self::deposit_event(Event::Scheduled { when: address.0, index: address.1 }); + Ok(address) + } + + fn push_to_agenda( + when: T::BlockNumber, + what: ScheduledOf, + ) -> Result)> { + let mut agenda = Agenda::::get(when); + let index = if (agenda.len() as u32) < T::MaxScheduledPerBlock::get() { + // will always succeed due to the above check. + let _ = agenda.try_push(Some(what)); + agenda.len() as u32 - 1 + } else { + if let Some(hole_index) = agenda.iter().position(|i| i.is_none()) { + agenda[hole_index] = Some(what); + hole_index as u32 + } else { + return Err((DispatchError::Exhausted, what)) + } + }; + Agenda::::insert(when, agenda); + Ok(index) + } + fn do_schedule( when: DispatchTime, maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: CallOrHashOf, + call: Bounded<::RuntimeCall>, ) -> Result, DispatchError> { let when = Self::resolve_time(when)?; - call.ensure_requested::(); // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let s = Some(Scheduled { + let task = Scheduled { maybe_id: None, priority, call, maybe_periodic, origin, - _phantom: PhantomData::::default(), - }); - Agenda::::append(when, s); - let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; - Self::deposit_event(Event::Scheduled { when, index }); - - Ok((when, index)) + _phantom: PhantomData, + }; + Self::place_task(when, task).map_err(|x| x.0) } fn do_cancel( @@ -713,7 +797,7 @@ impl Pallet { let scheduled = Agenda::::try_mutate(when, |agenda| { agenda.get_mut(index as usize).map_or( Ok(None), - |s| -> Result>, DispatchError> { + |s| -> Result>, DispatchError> { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { if matches!( T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), @@ -727,7 +811,7 @@ impl Pallet { ) })?; if let Some(s) = scheduled { - s.call.ensure_unrequested::(); + T::Preimages::drop(&s.call); if let Some(id) = s.maybe_id { Lookup::::remove(id); } @@ -748,27 +832,23 @@ impl Pallet { return Err(Error::::RescheduleNoChange.into()) } - Agenda::::try_mutate(when, |agenda| -> DispatchResult { + let task = Agenda::::try_mutate(when, |agenda| { let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; - let task = task.take().ok_or(Error::::NotFound)?; - Agenda::::append(new_time, Some(task)); - Ok(()) + ensure!(!matches!(task, Some(Scheduled { maybe_id: Some(_), .. })), Error::::Named); + task.take().ok_or(Error::::NotFound) })?; - - let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; Self::deposit_event(Event::Canceled { when, index }); - Self::deposit_event(Event::Scheduled { when: new_time, index: new_index }); - Ok((new_time, new_index)) + Self::place_task(new_time, task).map_err(|x| x.0) } fn do_schedule_named( - id: Vec, + id: TaskName, when: DispatchTime, maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: CallOrHashOf, + call: Bounded<::RuntimeCall>, ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { @@ -777,32 +857,24 @@ impl Pallet { let when = Self::resolve_time(when)?; - call.ensure_requested::(); - // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let s = Scheduled { - maybe_id: Some(id.clone()), + let task = Scheduled { + maybe_id: Some(id), priority, call, maybe_periodic, origin, _phantom: Default::default(), }; - Agenda::::append(when, Some(s)); - let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; - let address = (when, index); - Lookup::::insert(&id, &address); - Self::deposit_event(Event::Scheduled { when, index }); - - Ok(address) + Self::place_task(when, task).map_err(|x| x.0) } - fn do_cancel_named(origin: Option, id: Vec) -> DispatchResult { + fn do_cancel_named(origin: Option, id: TaskName) -> DispatchResult { Lookup::::try_mutate_exists(id, |lookup| -> DispatchResult { if let Some((when, index)) = lookup.take() { let i = index as usize; @@ -815,7 +887,7 @@ impl Pallet { ) { return Err(BadOrigin.into()) } - s.call.ensure_unrequested::(); + T::Preimages::drop(&s.call); } *s = None; } @@ -830,42 +902,245 @@ impl Pallet { } fn do_reschedule_named( - id: Vec, + id: TaskName, new_time: DispatchTime, ) -> Result, DispatchError> { let new_time = Self::resolve_time(new_time)?; - Lookup::::try_mutate_exists( - id, - |lookup| -> Result, DispatchError> { - let (when, index) = lookup.ok_or(Error::::NotFound)?; + let lookup = Lookup::::get(id); + let (when, index) = lookup.ok_or(Error::::NotFound)?; - if new_time == when { - return Err(Error::::RescheduleNoChange.into()) - } + if new_time == when { + return Err(Error::::RescheduleNoChange.into()) + } - Agenda::::try_mutate(when, |agenda| -> DispatchResult { - let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; - let task = task.take().ok_or(Error::::NotFound)?; - Agenda::::append(new_time, Some(task)); + let task = Agenda::::try_mutate(when, |agenda| { + let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; + task.take().ok_or(Error::::NotFound) + })?; + Self::deposit_event(Event::Canceled { when, index }); + Self::place_task(new_time, task).map_err(|x| x.0) + } +} - Ok(()) - })?; +enum ServiceTaskError { + /// Could not be executed due to missing preimage. + Unavailable, + /// Could not be executed due to weight limitations. + Overweight, +} +use ServiceTaskError::*; - let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; - Self::deposit_event(Event::Canceled { when, index }); - Self::deposit_event(Event::Scheduled { when: new_time, index: new_index }); +impl Pallet { + /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. + fn service_agendas(weight: &mut WeightCounter, now: T::BlockNumber, max: u32) { + if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { + return + } + + let mut incomplete_since = now + One::one(); + let mut when = IncompleteSince::::take().unwrap_or(now); + let mut executed = 0; + + let max_items = T::MaxScheduledPerBlock::get(); + let mut count_down = max; + let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items); + while count_down > 0 && when <= now && weight.can_accrue(service_agenda_base_weight) { + if !Self::service_agenda(weight, &mut executed, now, when, u32::max_value()) { + incomplete_since = incomplete_since.min(when); + } + when.saturating_inc(); + count_down.saturating_dec(); + } + incomplete_since = incomplete_since.min(when); + if incomplete_since <= now { + IncompleteSince::::put(incomplete_since); + } + } + + /// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a + /// later block. + fn service_agenda( + weight: &mut WeightCounter, + executed: &mut u32, + now: T::BlockNumber, + when: T::BlockNumber, + max: u32, + ) -> bool { + let mut agenda = Agenda::::get(when); + let mut ordered = agenda + .iter() + .enumerate() + .filter_map(|(index, maybe_item)| { + maybe_item.as_ref().map(|item| (index as u32, item.priority)) + }) + .collect::>(); + ordered.sort_by_key(|k| k.1); + let within_limit = + weight.check_accrue(T::WeightInfo::service_agenda_base(ordered.len() as u32)); + debug_assert!(within_limit, "weight limit should have been checked in advance"); + + // Items which we know can be executed and have postponed for execution in a later block. + let mut postponed = (ordered.len() as u32).saturating_sub(max); + // Items which we don't know can ever be executed. + let mut dropped = 0; + + for (agenda_index, _) in ordered.into_iter().take(max as usize) { + let task = match agenda[agenda_index as usize].take() { + None => continue, + Some(t) => t, + }; + let base_weight = T::WeightInfo::service_task( + task.call.lookup_len().map(|x| x as usize), + task.maybe_id.is_some(), + task.maybe_periodic.is_some(), + ); + if !weight.can_accrue(base_weight) { + postponed += 1; + break + } + let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); + agenda[agenda_index as usize] = match result { + Err((Unavailable, slot)) => { + dropped += 1; + slot + }, + Err((Overweight, slot)) => { + postponed += 1; + slot + }, + Ok(()) => { + *executed += 1; + None + }, + }; + } + if postponed > 0 || dropped > 0 { + Agenda::::insert(when, agenda); + } else { + Agenda::::remove(when); + } + postponed == 0 + } - *lookup = Some((new_time, new_index)); + /// Service (i.e. execute) the given task, being careful not to overflow the `weight` counter. + /// + /// This involves: + /// - removing and potentially replacing the `Lookup` entry for the task. + /// - realizing the task's call which can include a preimage lookup. + /// - Rescheduling the task for execution in a later agenda if periodic. + fn service_task( + weight: &mut WeightCounter, + now: T::BlockNumber, + when: T::BlockNumber, + agenda_index: u32, + is_first: bool, + mut task: ScheduledOf, + ) -> Result<(), (ServiceTaskError, Option>)> { + if let Some(ref id) = task.maybe_id { + Lookup::::remove(id); + } - Ok((new_time, new_index)) + let (call, lookup_len) = match T::Preimages::peek(&task.call) { + Ok(c) => c, + Err(_) => return Err((Unavailable, Some(task))), + }; + + weight.check_accrue(T::WeightInfo::service_task( + lookup_len.map(|x| x as usize), + task.maybe_id.is_some(), + task.maybe_periodic.is_some(), + )); + + match Self::execute_dispatch(weight, task.origin.clone(), call) { + Err(Unavailable) => { + debug_assert!(false, "Checked to exist with `peek`"); + Self::deposit_event(Event::CallUnavailable { + task: (when, agenda_index), + id: task.maybe_id, + }); + Err((Unavailable, Some(task))) + }, + Err(Overweight) if is_first => { + T::Preimages::drop(&task.call); + Self::deposit_event(Event::PermanentlyOverweight { + task: (when, agenda_index), + id: task.maybe_id, + }); + Err((Unavailable, Some(task))) + }, + Err(Overweight) => Err((Overweight, Some(task))), + Ok(result) => { + Self::deposit_event(Event::Dispatched { + task: (when, agenda_index), + id: task.maybe_id, + result, + }); + if let &Some((period, count)) = &task.maybe_periodic { + if count > 1 { + task.maybe_periodic = Some((period, count - 1)); + } else { + task.maybe_periodic = None; + } + let wake = now.saturating_add(period); + match Self::place_task(wake, task) { + Ok(_) => {}, + Err((_, task)) => { + // TODO: Leave task in storage somewhere for it to be rescheduled + // manually. + T::Preimages::drop(&task.call); + Self::deposit_event(Event::PeriodicFailed { + task: (when, agenda_index), + id: task.maybe_id, + }); + }, + } + } else { + T::Preimages::drop(&task.call); + } + Ok(()) }, - ) + } + } + + /// Make a dispatch to the given `call` from the given `origin`, ensuring that the `weight` + /// counter does not exceed its limit and that it is counted accurately (e.g. accounted using + /// post info if available). + /// + /// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the + /// call itself). + fn execute_dispatch( + weight: &mut WeightCounter, + origin: T::PalletsOrigin, + call: ::RuntimeCall, + ) -> Result { + let base_weight = match origin.as_system_ref() { + Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(), + _ => T::WeightInfo::execute_dispatch_unsigned(), + }; + let call_weight = call.get_dispatch_info().weight; + // We only allow a scheduled call if it cannot push the weight past the limit. + let max_weight = base_weight.saturating_add(call_weight); + + if !weight.can_accrue(max_weight) { + return Err(Overweight) + } + + let dispatch_origin = origin.into(); + let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { + Ok(post_info) => (post_info.actual_weight, Ok(())), + Err(error_and_info) => + (error_and_info.post_info.actual_weight, Err(error_and_info.error)), + }; + let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); + weight.check_accrue(base_weight); + weight.check_accrue(call_weight); + Ok(result) } } -impl schedule::v2::Anon::RuntimeCall, T::PalletsOrigin> - for Pallet +impl> + schedule::v2::Anon::RuntimeCall, T::PalletsOrigin> for Pallet { type Address = TaskAddress; type Hash = T::Hash; @@ -877,6 +1152,8 @@ impl schedule::v2::Anon::RuntimeCall, T origin: T::PalletsOrigin, call: CallOrHashOf, ) -> Result { + let call = call.as_value().ok_or(DispatchError::CannotLookup)?; + let call = T::Preimages::bound(call)?.transmute(); Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -896,8 +1173,8 @@ impl schedule::v2::Anon::RuntimeCall, T } } -impl schedule::v2::Named::RuntimeCall, T::PalletsOrigin> - for Pallet +impl> + schedule::v2::Named::RuntimeCall, T::PalletsOrigin> for Pallet { type Address = TaskAddress; type Hash = T::Hash; @@ -910,23 +1187,108 @@ impl schedule::v2::Named::RuntimeCall, origin: T::PalletsOrigin, call: CallOrHashOf, ) -> Result { - Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call).map_err(|_| ()) + let call = call.as_value().ok_or(())?; + let call = T::Preimages::bound(call).map_err(|_| ())?.transmute(); + let name = blake2_256(&id[..]); + Self::do_schedule_named(name, when, maybe_periodic, priority, origin, call).map_err(|_| ()) } fn cancel_named(id: Vec) -> Result<(), ()> { - Self::do_cancel_named(None, id).map_err(|_| ()) + let name = blake2_256(&id[..]); + Self::do_cancel_named(None, name).map_err(|_| ()) } fn reschedule_named( id: Vec, when: DispatchTime, ) -> Result { - Self::do_reschedule_named(id, when) + let name = blake2_256(&id[..]); + Self::do_reschedule_named(name, when) } fn next_dispatch_time(id: Vec) -> Result { - Lookup::::get(id) + let name = blake2_256(&id[..]); + Lookup::::get(name) .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) .ok_or(()) } } + +impl schedule::v3::Anon::RuntimeCall, T::PalletsOrigin> + for Pallet +{ + type Address = TaskAddress; + + fn schedule( + when: DispatchTime, + maybe_periodic: Option>, + priority: schedule::Priority, + origin: T::PalletsOrigin, + call: Bounded<::RuntimeCall>, + ) -> Result { + Self::do_schedule(when, maybe_periodic, priority, origin, call) + } + + fn cancel((when, index): Self::Address) -> Result<(), DispatchError> { + Self::do_cancel(None, (when, index)).map_err(map_err_to_v3_err::) + } + + fn reschedule( + address: Self::Address, + when: DispatchTime, + ) -> Result { + Self::do_reschedule(address, when).map_err(map_err_to_v3_err::) + } + + fn next_dispatch_time((when, index): Self::Address) -> Result { + Agenda::::get(when) + .get(index as usize) + .ok_or(DispatchError::Unavailable) + .map(|_| when) + } +} + +use schedule::v3::TaskName; + +impl schedule::v3::Named::RuntimeCall, T::PalletsOrigin> + for Pallet +{ + type Address = TaskAddress; + + fn schedule_named( + id: TaskName, + when: DispatchTime, + maybe_periodic: Option>, + priority: schedule::Priority, + origin: T::PalletsOrigin, + call: Bounded<::RuntimeCall>, + ) -> Result { + Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call) + } + + fn cancel_named(id: TaskName) -> Result<(), DispatchError> { + Self::do_cancel_named(None, id).map_err(map_err_to_v3_err::) + } + + fn reschedule_named( + id: TaskName, + when: DispatchTime, + ) -> Result { + Self::do_reschedule_named(id, when).map_err(map_err_to_v3_err::) + } + + fn next_dispatch_time(id: TaskName) -> Result { + Lookup::::get(id) + .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) + .ok_or(DispatchError::Unavailable) + } +} + +/// Maps a pallet error to an `schedule::v3` error. +fn map_err_to_v3_err(err: DispatchError) -> DispatchError { + if err == DispatchError::from(Error::::NotFound) { + DispatchError::Unavailable + } else { + err + } +} diff --git a/frame/scheduler/src/migration.rs b/frame/scheduler/src/migration.rs new file mode 100644 index 0000000000000..6769d20023196 --- /dev/null +++ b/frame/scheduler/src/migration.rs @@ -0,0 +1,402 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations for the scheduler pallet. + +use super::*; +use frame_support::traits::OnRuntimeUpgrade; + +/// The log target. +const TARGET: &'static str = "runtime::scheduler::migration"; + +pub mod v1 { + use super::*; + use frame_support::pallet_prelude::*; + + #[frame_support::storage_alias] + pub(crate) type Agenda = StorageMap< + Pallet, + Twox64Concat, + ::BlockNumber, + Vec< + Option< + ScheduledV1<::RuntimeCall, ::BlockNumber>, + >, + >, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(crate) type Lookup = StorageMap< + Pallet, + Twox64Concat, + Vec, + TaskAddress<::BlockNumber>, + >; +} + +pub mod v2 { + use super::*; + use frame_support::pallet_prelude::*; + + #[frame_support::storage_alias] + pub(crate) type Agenda = StorageMap< + Pallet, + Twox64Concat, + ::BlockNumber, + Vec>>, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(crate) type Lookup = StorageMap< + Pallet, + Twox64Concat, + Vec, + TaskAddress<::BlockNumber>, + >; +} + +pub mod v3 { + use super::*; + use frame_support::pallet_prelude::*; + + #[frame_support::storage_alias] + pub(crate) type Agenda = StorageMap< + Pallet, + Twox64Concat, + ::BlockNumber, + Vec>>, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(crate) type Lookup = StorageMap< + Pallet, + Twox64Concat, + Vec, + TaskAddress<::BlockNumber>, + >; + + /// Migrate the scheduler pallet from V3 to V4. + pub struct MigrateToV4(sp_std::marker::PhantomData); + + impl> OnRuntimeUpgrade for MigrateToV4 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert_eq!(StorageVersion::get::>(), 3, "Can only upgrade from version 3"); + + let agendas = Agenda::::iter_keys().count() as u32; + let decodable_agendas = Agenda::::iter_values().count() as u32; + if agendas != decodable_agendas { + // This is not necessarily an error, but can happen when there are Calls + // in an Agenda that are not valid anymore with the new runtime. + log::error!( + target: TARGET, + "Can only decode {} of {} agendas - others will be dropped", + decodable_agendas, + agendas + ); + } + log::info!(target: TARGET, "Trying to migrate {} agendas...", decodable_agendas); + + // Check that no agenda overflows `MaxScheduledPerBlock`. + let max_scheduled_per_block = T::MaxScheduledPerBlock::get() as usize; + for (block_number, agenda) in Agenda::::iter() { + if agenda.iter().cloned().filter_map(|s| s).count() > max_scheduled_per_block { + log::error!( + target: TARGET, + "Would truncate agenda of block {:?} from {} items to {} items.", + block_number, + agenda.len(), + max_scheduled_per_block, + ); + return Err("Agenda would overflow `MaxScheduledPerBlock`.") + } + } + // Check that bounding the calls will not overflow `MAX_LENGTH`. + let max_length = T::Preimages::MAX_LENGTH as usize; + for (block_number, agenda) in Agenda::::iter() { + for schedule in agenda.iter().cloned().filter_map(|s| s) { + match schedule.call { + frame_support::traits::schedule::MaybeHashed::Value(call) => { + let l = call.using_encoded(|c| c.len()); + if l > max_length { + log::error!( + target: TARGET, + "Call in agenda of block {:?} is too large: {} byte", + block_number, + l, + ); + return Err("Call is too large.") + } + }, + _ => (), + } + } + } + + Ok((decodable_agendas as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let version = StorageVersion::get::>(); + if version != 3 { + log::warn!( + target: TARGET, + "skipping v3 to v4 migration: executed on wrong storage version.\ + Expected version 3, found {:?}", + version, + ); + return T::DbWeight::get().reads(1) + } + + crate::Pallet::::migrate_v3_to_v4() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + assert_eq!(StorageVersion::get::>(), 4, "Must upgrade"); + + // Check that everything decoded fine. + for k in crate::Agenda::::iter_keys() { + assert!(crate::Agenda::::try_get(k).is_ok(), "Cannot decode V4 Agenda"); + } + + let old_agendas: u32 = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + let new_agendas = crate::Agenda::::iter_keys().count() as u32; + if old_agendas != new_agendas { + // This is not necessarily an error, but can happen when there are Calls + // in an Agenda that are not valid anymore in the new runtime. + log::error!( + target: TARGET, + "Did not migrate all Agendas. Previous {}, Now {}", + old_agendas, + new_agendas, + ); + } else { + log::info!(target: TARGET, "Migrated {} agendas.", new_agendas); + } + + Ok(()) + } + } +} + +#[cfg(test)] +#[cfg(feature = "try-runtime")] +mod test { + use super::*; + use crate::mock::*; + use frame_support::Hashable; + use sp_std::borrow::Cow; + use substrate_test_utils::assert_eq_uvec; + + #[test] + #[allow(deprecated)] + fn migration_v3_to_v4_works() { + new_test_ext().execute_with(|| { + // Assume that we are at V3. + StorageVersion::new(3).put::(); + + // Call that will be bounded to a `Lookup`. + let large_call = + RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1024] }); + // Call that can be inlined. + let small_call = + RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 10] }); + // Call that is already hashed and can will be converted to `Legacy`. + let hashed_call = + RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 2048] }); + let bound_hashed_call = Preimage::bound(hashed_call.clone()).unwrap(); + assert!(bound_hashed_call.lookup_needed()); + // A Call by hash that will fail to decode becomes `None`. + let trash_data = vec![255u8; 1024]; + let undecodable_hash = Preimage::note(Cow::Borrowed(&trash_data)).unwrap(); + + for i in 0..2u64 { + let k = i.twox_64_concat(); + let old = vec![ + Some(ScheduledV3Of:: { + maybe_id: None, + priority: i as u8 + 10, + call: small_call.clone().into(), + maybe_periodic: None, // 1 + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV3Of:: { + maybe_id: Some(vec![i as u8; 32]), + priority: 123, + call: large_call.clone().into(), + maybe_periodic: Some((4u64, 20)), + origin: signed(i), + _phantom: PhantomData::::default(), + }), + Some(ScheduledV3Of:: { + maybe_id: Some(vec![255 - i as u8; 320]), + priority: 123, + call: MaybeHashed::Hash(bound_hashed_call.hash()), + maybe_periodic: Some((8u64, 10)), + origin: signed(i), + _phantom: PhantomData::::default(), + }), + Some(ScheduledV3Of:: { + maybe_id: Some(vec![i as u8; 320]), + priority: 123, + call: MaybeHashed::Hash(undecodable_hash.clone()), + maybe_periodic: Some((4u64, 20)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ]; + frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); + } + + let state = v3::MigrateToV4::::pre_upgrade().unwrap(); + let _w = v3::MigrateToV4::::on_runtime_upgrade(); + v3::MigrateToV4::::post_upgrade(state).unwrap(); + + let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); + x.sort_by_key(|x| x.0); + + let bound_large_call = Preimage::bound(large_call).unwrap(); + assert!(bound_large_call.lookup_needed()); + let bound_small_call = Preimage::bound(small_call).unwrap(); + assert!(!bound_small_call.lookup_needed()); + + let expected = vec![ + ( + 0, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 10, + call: bound_small_call.clone(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[0u8; 32])), + priority: 123, + call: bound_large_call.clone(), + maybe_periodic: Some((4u64, 20)), + origin: signed(0), + _phantom: PhantomData::::default(), + }), + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[255u8; 320])), + priority: 123, + call: Bounded::from_legacy_hash(bound_hashed_call.hash()), + maybe_periodic: Some((8u64, 10)), + origin: signed(0), + _phantom: PhantomData::::default(), + }), + None, + ], + ), + ( + 1, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 11, + call: bound_small_call.clone(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[1u8; 32])), + priority: 123, + call: bound_large_call.clone(), + maybe_periodic: Some((4u64, 20)), + origin: signed(1), + _phantom: PhantomData::::default(), + }), + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[254u8; 320])), + priority: 123, + call: Bounded::from_legacy_hash(bound_hashed_call.hash()), + maybe_periodic: Some((8u64, 10)), + origin: signed(1), + _phantom: PhantomData::::default(), + }), + None, + ], + ), + ]; + for (outer, (i, j)) in x.iter().zip(expected.iter()).enumerate() { + assert_eq!(i.0, j.0); + for (inner, (x, y)) in i.1.iter().zip(j.1.iter()).enumerate() { + assert_eq!(x, y, "at index: outer {} inner {}", outer, inner); + } + } + assert_eq_uvec!(x, expected); + + assert_eq!(StorageVersion::get::(), 4); + }); + } + + #[test] + #[allow(deprecated)] + fn migration_v3_to_v4_too_large_calls_are_ignored() { + new_test_ext().execute_with(|| { + // Assume that we are at V3. + StorageVersion::new(3).put::(); + + let too_large_call = RuntimeCall::System(frame_system::Call::remark { + remark: vec![0; ::Preimages::MAX_LENGTH + 1], + }); + + let i = 0u64; + let k = i.twox_64_concat(); + let old = vec![Some(ScheduledV3Of:: { + maybe_id: None, + priority: 1, + call: too_large_call.clone().into(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + })]; + frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); + + // The pre_upgrade hook fails: + let err = v3::MigrateToV4::::pre_upgrade().unwrap_err(); + assert!(err.contains("Call is too large")); + // But the migration itself works: + let _w = v3::MigrateToV4::::on_runtime_upgrade(); + + let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); + x.sort_by_key(|x| x.0); + // The call becomes `None`. + let expected = vec![(0, vec![None])]; + assert_eq_uvec!(x, expected); + + assert_eq!(StorageVersion::get::(), 4); + }); + } + + fn signed(i: u64) -> OriginCaller { + system::RawOrigin::Signed(i).into() + } +} diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 6aaad13e48183..61efdfb67b73e 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -124,7 +124,7 @@ parameter_types! { } impl system::Config for Test { type BaseCallFilter = BaseFilter; - type BlockWeights = (); + type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; @@ -151,10 +151,6 @@ impl system::Config for Test { impl logger::Config for Test { type RuntimeEvent = RuntimeEvent; } -parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; - pub const NoPreimagePostponement: Option = Some(2); -} ord_parameter_types! { pub const One: u64 = 1; } @@ -164,11 +160,54 @@ impl pallet_preimage::Config for Test { type WeightInfo = (); type Currency = (); type ManagerOrigin = EnsureRoot; - type MaxSize = ConstU32<1024>; type BaseDeposit = (); type ByteDeposit = (); } +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn service_agendas_base() -> Weight { + Weight::from_ref_time(0b0000_0001) + } + fn service_agenda_base(i: u32) -> Weight { + Weight::from_ref_time((i << 8) as u64 + 0b0000_0010) + } + fn service_task_base() -> Weight { + Weight::from_ref_time(0b0000_0100) + } + fn service_task_periodic() -> Weight { + Weight::from_ref_time(0b0000_1100) + } + fn service_task_named() -> Weight { + Weight::from_ref_time(0b0001_0100) + } + fn service_task_fetched(s: u32) -> Weight { + Weight::from_ref_time((s << 8) as u64 + 0b0010_0100) + } + fn execute_dispatch_signed() -> Weight { + Weight::from_ref_time(0b0100_0000) + } + fn execute_dispatch_unsigned() -> Weight { + Weight::from_ref_time(0b1000_0000) + } + fn schedule(_s: u32) -> Weight { + Weight::from_ref_time(50) + } + fn cancel(_s: u32) -> Weight { + Weight::from_ref_time(50) + } + fn schedule_named(_s: u32) -> Weight { + Weight::from_ref_time(50) + } + fn cancel_named(_s: u32) -> Weight { + Weight::from_ref_time(50) + } +} +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; +} + impl Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeOrigin = RuntimeOrigin; @@ -177,10 +216,9 @@ impl Config for Test { type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EitherOfDiverse, EnsureSignedBy>; type MaxScheduledPerBlock = ConstU32<10>; - type WeightInfo = (); + type WeightInfo = TestWeightInfo; type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = Preimage; - type NoPreimagePostponement = NoPreimagePostponement; + type Preimages = Preimage; } pub type LoggerCall = logger::Call; diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index f6db70ae42d49..033d787946709 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -23,7 +23,7 @@ use crate::mock::{ }; use frame_support::{ assert_err, assert_noop, assert_ok, - traits::{Contains, GetStorageVersion, OnInitialize, PreimageProvider}, + traits::{Contains, GetStorageVersion, OnInitialize, QueryPreimage, StorePreimage}, Hashable, }; use sp_runtime::traits::Hash; @@ -33,9 +33,15 @@ use substrate_test_utils::assert_eq_uvec; fn basic_scheduling_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); - assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into())); + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap() + )); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); @@ -49,51 +55,19 @@ fn basic_scheduling_works() { fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); let hash = ::Hashing::hash_of(&call); - let hashed = MaybeHashed::Hash(hash); + let len = call.using_encoded(|x| x.len()) as u32; + let hashed = Preimage::pick(hash, len); assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); - assert!(Preimage::preimage_requested(&hash)); + assert!(Preimage::is_requested(&hash)); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); - assert!(!Preimage::have_preimage(&hash)); - assert!(!Preimage::preimage_requested(&hash)); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - }); -} - -#[test] -fn scheduling_with_preimage_postpones_correctly() { - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); - let hash = ::Hashing::hash_of(&call); - let hashed = MaybeHashed::Hash(hash); - - assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); - assert!(Preimage::preimage_requested(&hash)); - - run_to_block(4); - // #4 empty due to no preimage - assert!(logger::log().is_empty()); - - // Register preimage. - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); - - run_to_block(5); - // #5 empty since postponement is 2 blocks. - assert!(logger::log().is_empty()); - - run_to_block(6); - // #6 is good. + assert!(!Preimage::len(&hash).is_some()); + assert!(!Preimage::is_requested(&hash)); assert_eq!(logger::log(), vec![(root(), 42u32)]); - assert!(!Preimage::have_preimage(&hash)); - assert!(!Preimage::preimage_requested(&hash)); - run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); @@ -104,10 +78,16 @@ fn schedule_after_works() { new_test_ext().execute_with(|| { run_to_block(2); let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 - assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call.into())); + assert_ok!(Scheduler::do_schedule( + DispatchTime::After(3), + None, + 127, + root(), + Preimage::bound(call).unwrap() + )); run_to_block(5); assert!(logger::log().is_empty()); run_to_block(6); @@ -122,9 +102,15 @@ fn schedule_after_zero_works() { new_test_ext().execute_with(|| { run_to_block(2); let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); - assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call.into())); + assert_ok!(Scheduler::do_schedule( + DispatchTime::After(0), + None, + 127, + root(), + Preimage::bound(call).unwrap() + )); // Will trigger on the next block. run_to_block(3); assert_eq!(logger::log(), vec![(root(), 42u32)]); @@ -142,8 +128,11 @@ fn periodic_scheduling_works() { Some((3, 3)), 127, root(), - RuntimeCall::Logger(logger::Call::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into() + Preimage::bound(RuntimeCall::Logger(logger::Call::log { + i: 42, + weight: Weight::from_ref_time(10) + })) + .unwrap() )); run_to_block(3); assert!(logger::log().is_empty()); @@ -166,10 +155,17 @@ fn periodic_scheduling_works() { fn reschedule_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( - Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into()).unwrap(), + Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap() + ) + .unwrap(), (4, 0) ); @@ -198,16 +194,16 @@ fn reschedule_works() { fn reschedule_named_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), None, 127, root(), - call.into(), + Preimage::bound(call).unwrap(), ) .unwrap(), (4, 0) @@ -216,13 +212,10 @@ fn reschedule_named_works() { run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), - (6, 0) - ); + assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); assert_noop!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)), + Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)), Error::::RescheduleNoChange ); @@ -241,16 +234,16 @@ fn reschedule_named_works() { fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), Some((3, 3)), 127, root(), - call.into(), + Preimage::bound(call).unwrap(), ) .unwrap(), (4, 0) @@ -259,14 +252,8 @@ fn reschedule_named_perodic_works() { run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(5)).unwrap(), - (5, 0) - ); - assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), - (6, 0) - ); + assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(5)).unwrap(), (5, 0)); + assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); run_to_block(5); assert!(logger::log().is_empty()); @@ -275,7 +262,7 @@ fn reschedule_named_perodic_works() { assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(10)).unwrap(), + Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(10)).unwrap(), (10, 0) ); @@ -298,13 +285,16 @@ fn cancel_named_scheduling_works_with_normal_cancel() { new_test_ext().execute_with(|| { // at #4. Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); let i = Scheduler::do_schedule( @@ -312,13 +302,16 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); run_to_block(3); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); + assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); assert_ok!(Scheduler::do_cancel(None, i)); run_to_block(100); assert!(logger::log().is_empty()); @@ -330,35 +323,44 @@ fn cancel_named_periodic_scheduling_works() { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), Some((3, 3)), 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); // same id results in error. assert!(Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10) + })) + .unwrap(), ) .is_err()); // different id is ok. Scheduler::do_schedule_named( - 2u32.encode(), + [2u8; 32], DispatchTime::At(8), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); run_to_block(3); @@ -366,7 +368,7 @@ fn cancel_named_periodic_scheduling_works() { run_to_block(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(6); - assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); + assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); @@ -374,28 +376,23 @@ fn cancel_named_periodic_scheduling_works() { #[test] fn scheduler_respects_weight_limits() { + let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); // 69 and 42 do not fit together run_to_block(4); @@ -405,62 +402,128 @@ fn scheduler_respects_weight_limits() { }); } +/// Permanently overweight calls are not deleted but also not executed. #[test] -fn scheduler_respects_hard_deadlines_more() { +fn scheduler_does_not_delete_permanently_overweight_call() { + let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, - 0, + 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); + // Never executes. + run_to_block(100); + assert_eq!(logger::log(), vec![]); + + // Assert the `PermanentlyOverweight` event. + assert_eq!( + System::events().last().unwrap().event, + crate::Event::PermanentlyOverweight { task: (4, 0), id: None }.into(), + ); + // The call is still in the agenda. + assert!(Agenda::::get(4)[0].is_some()); + }); +} + +#[test] +fn scheduler_handles_periodic_failure() { + let max_weight: Weight = ::MaximumWeight::get(); + let max_per_block = ::MaxScheduledPerBlock::get(); + + new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); + let bound = Preimage::bound(call).unwrap(); + assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), - None, - 0, + Some((4, u32::MAX)), + 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + bound.clone(), + )); + // Executes 5 times till block 20. + run_to_block(20); + assert_eq!(logger::log().len(), 5); + + // Block 28 will already be full. + for _ in 0..max_per_block { + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(28), + None, + 120, + root(), + bound.clone(), + )); + } + + // Going to block 24 will emit a `PeriodicFailed` event. + run_to_block(24); + assert_eq!(logger::log().len(), 6); + + assert_eq!( + System::events().last().unwrap().event, + crate::Event::PeriodicFailed { task: (24, 0), id: None }.into(), + ); + }); +} + +#[test] +fn scheduler_handles_periodic_unavailable_preimage() { + let max_weight: Weight = ::MaximumWeight::get(); + + new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); + let hash = ::Hashing::hash_of(&call); + let len = call.using_encoded(|x| x.len()) as u32; + let bound = Preimage::pick(hash, len); + assert_ok!(Preimage::note(call.encode().into())); + + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + Some((4, u32::MAX)), + 127, + root(), + bound.clone(), )); - // With base weights, 69 and 42 should not fit together, but do because of hard - // deadlines + // Executes 1 times till block 4. run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); + assert_eq!(logger::log().len(), 1); + + // Unnote the preimage. + Preimage::unnote(&hash); + + // Does not ever execute again. + run_to_block(100); + assert_eq!(logger::log().len(), 1); + + // The preimage is not requested anymore. + assert!(!Preimage::is_requested(&hash)); }); } #[test] fn scheduler_respects_priority_ordering() { + let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 1, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 3 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 0, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); run_to_block(4); assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); @@ -470,35 +533,30 @@ fn scheduler_respects_priority_ordering() { #[test] fn scheduler_respects_priority_ordering_with_soft_deadlines() { new_test_ext().execute_with(|| { - let max_weight = MaximumSchedulerWeight::get() - <() as WeightInfo>::on_initialize(0); - let item_weight = - <() as WeightInfo>::on_initialize(1) - <() as WeightInfo>::on_initialize(0); + let max_weight: Weight = ::MaximumWeight::get(); + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 5 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 255, root(), - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 2 - item_weight }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 5 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 2 - item_weight }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 2600, weight: max_weight / 5 * 4 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 126, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 2600, - weight: max_weight / 2 - item_weight + Weight::from_ref_time(1) - }) - .into(), + Preimage::bound(call).unwrap(), )); // 2600 does not fit with 69 or 42, but has higher priority, so will go through @@ -513,90 +571,96 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() { #[test] fn on_initialize_weight_is_correct() { new_test_ext().execute_with(|| { - let base_weight = <() as WeightInfo>::on_initialize(0); - let call_weight = MaximumSchedulerWeight::get() / 4; + let call_weight = Weight::from_ref_time(25); // Named + let call = RuntimeCall::Logger(LoggerCall::log { + i: 3, + weight: call_weight + Weight::from_ref_time(1), + }); assert_ok!(Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(3), None, 255, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 3, - weight: call_weight + Weight::from_ref_time(1) - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: call_weight + Weight::from_ref_time(2), + }); // Anon Periodic assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), Some((1000, 3)), 128, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: call_weight + Weight::from_ref_time(2) - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: call_weight + Weight::from_ref_time(3), + }); // Anon assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: call_weight + Weight::from_ref_time(3) - }) - .into(), + Preimage::bound(call).unwrap(), )); // Named Periodic + let call = RuntimeCall::Logger(LoggerCall::log { + i: 2600, + weight: call_weight + Weight::from_ref_time(4), + }); assert_ok!(Scheduler::do_schedule_named( - 2u32.encode(), + [2u8; 32], DispatchTime::At(1), Some((1000, 3)), 126, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 2600, - weight: call_weight + Weight::from_ref_time(4) - }) - .into(), + Preimage::bound(call).unwrap(), )); // Will include the named periodic only - let actual_weight = Scheduler::on_initialize(1); assert_eq!( - actual_weight, - base_weight + - call_weight + Weight::from_ref_time(4) + - <() as MarginalWeightInfo>::item(true, true, Some(false)) + Scheduler::on_initialize(1), + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(4) ); + assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32)]); // Will include anon and anon periodic - let actual_weight = Scheduler::on_initialize(2); assert_eq!( - actual_weight, - base_weight + - call_weight + Weight::from_ref_time(2) + - <() as MarginalWeightInfo>::item(false, false, Some(false)) + + Scheduler::on_initialize(2), + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(2) + + ::service_task(None, false, true) + + TestWeightInfo::execute_dispatch_unsigned() + call_weight + Weight::from_ref_time(3) + - <() as MarginalWeightInfo>::item(true, false, Some(false)) + ::service_task(None, false, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(2) ); + assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); // Will include named only - let actual_weight = Scheduler::on_initialize(3); assert_eq!( - actual_weight, - base_weight + - call_weight + Weight::from_ref_time(1) + - <() as MarginalWeightInfo>::item(false, true, Some(false)) + Scheduler::on_initialize(3), + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(1) ); + assert_eq!(IncompleteSince::::get(), None); assert_eq!( logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32), (root(), 3u32)] @@ -604,35 +668,33 @@ fn on_initialize_weight_is_correct() { // Will contain none let actual_weight = Scheduler::on_initialize(4); - assert_eq!(actual_weight, base_weight); + assert_eq!( + actual_weight, + TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(0) + ); }); } #[test] fn root_calls_works() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), + let call = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); + assert_ok!( + Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 4, None, 127, call,) ); - assert_ok!(Scheduler::schedule_named( - RuntimeOrigin::root(), - 1u32.encode(), - 4, - None, - 127, - call, - )); assert_ok!(Scheduler::schedule(RuntimeOrigin::root(), 4, None, 127, call2)); run_to_block(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), 1u32.encode())); + assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), [1u8; 32])); assert_ok!(Scheduler::cancel(RuntimeOrigin::root(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); @@ -645,29 +707,30 @@ fn fails_to_schedule_task_in_the_past() { new_test_ext().execute_with(|| { run_to_block(3); - let call1 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call3 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); - assert_err!( - Scheduler::schedule_named(RuntimeOrigin::root(), 1u32.encode(), 2, None, 127, call1), + let call1 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); + let call3 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); + + assert_noop!( + Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 2, None, 127, call1), Error::::TargetBlockNumberInPast, ); - assert_err!( + assert_noop!( Scheduler::schedule(RuntimeOrigin::root(), 2, None, 127, call2), Error::::TargetBlockNumberInPast, ); - assert_err!( + assert_noop!( Scheduler::schedule(RuntimeOrigin::root(), 3, None, 127, call3), Error::::TargetBlockNumberInPast, ); @@ -675,19 +738,19 @@ fn fails_to_schedule_task_in_the_past() { } #[test] -fn should_use_orign() { +fn should_use_origin() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); + let call = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), - 1u32.encode(), + [1u8; 32], 4, None, 127, @@ -698,7 +761,7 @@ fn should_use_orign() { // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), 1u32.encode())); + assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), [1u8; 32])); assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); @@ -707,20 +770,20 @@ fn should_use_orign() { } #[test] -fn should_check_orign() { +fn should_check_origin() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); + let call = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); assert_noop!( Scheduler::schedule_named( system::RawOrigin::Signed(2).into(), - 1u32.encode(), + [1u8; 32], 4, None, 127, @@ -736,25 +799,19 @@ fn should_check_orign() { } #[test] -fn should_check_orign_for_cancel() { +fn should_check_origin_for_cancel() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 69, - weight: Weight::from_ref_time(1000), - }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 42, - weight: Weight::from_ref_time(1000), - }) - .into(), - ); + let call = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { + i: 42, + weight: Weight::from_ref_time(10), + })); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), - 1u32.encode(), + [1u8; 32], 4, None, 127, @@ -766,14 +823,11 @@ fn should_check_orign_for_cancel() { assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); assert_noop!( - Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), 1u32.encode()), + Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), [1u8; 32]), BadOrigin ); assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin); - assert_noop!( - Scheduler::cancel_named(system::RawOrigin::Root.into(), 1u32.encode()), - BadOrigin - ); + assert_noop!(Scheduler::cancel_named(system::RawOrigin::Root.into(), [1u8; 32]), BadOrigin); assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin); run_to_block(5); assert_eq!( @@ -787,7 +841,7 @@ fn should_check_orign_for_cancel() { } #[test] -fn migration_to_v3_works() { +fn migration_to_v4_works() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); @@ -807,7 +861,7 @@ fn migration_to_v3_works() { priority: 123, call: RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000), + weight: Weight::from_ref_time(10), }), maybe_periodic: Some((456u64, 10)), }), @@ -815,103 +869,109 @@ fn migration_to_v3_works() { frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); } - Scheduler::migrate_v1_to_v3(); - - assert_eq_uvec!( - Agenda::::iter().collect::>(), - vec![ - ( - 0, - vec![ - Some(ScheduledV3Of:: { - maybe_id: None, - priority: 10, - call: RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - }) - .into(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV3Of:: { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ), - ( - 1, - vec![ - Some(ScheduledV3Of:: { - maybe_id: None, - priority: 11, - call: RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - }) - .into(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV3Of:: { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ), - ( - 2, - vec![ - Some(ScheduledV3Of:: { - maybe_id: None, - priority: 12, - call: RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - }) - .into(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV3Of:: { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ) - ] - ); + Scheduler::migrate_v1_to_v4(); + + let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); + x.sort_by_key(|x| x.0); + let expected = vec![ + ( + 0, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 10, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&b"test"[..])), + priority: 123, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ], + ), + ( + 1, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 11, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&b"test"[..])), + priority: 123, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ], + ), + ( + 2, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 12, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&b"test"[..])), + priority: 123, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ], + ), + ]; + for (i, j) in x.iter().zip(expected.iter()) { + assert_eq!(i.0, j.0); + for (x, y) in i.1.iter().zip(j.1.iter()) { + assert_eq!(x, y); + } + } + assert_eq_uvec!(x, expected); assert_eq!(Scheduler::current_storage_version(), 3); }); @@ -922,29 +982,29 @@ fn test_migrate_origin() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); - let old: Vec, u64, u32, u64>>> = vec![ + let old: Vec, u64, u32, u64>>> = vec![ Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100), - }) - .into(), + })) + .unwrap(), origin: 3u32, maybe_periodic: None, _phantom: Default::default(), }), None, Some(Scheduled { - maybe_id: Some(b"test".to_vec()), + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, origin: 2u32, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000), - }) - .into(), + weight: Weight::from_ref_time(10), + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), _phantom: Default::default(), }), @@ -965,32 +1025,32 @@ fn test_migrate_origin() { Scheduler::migrate_origin::(); assert_eq_uvec!( - Agenda::::iter().collect::>(), + Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(), vec![ ( 0, vec![ - Some(ScheduledV2::, u64, OriginCaller, u64> { + Some(ScheduledOf:: { maybe_id: None, priority: 10, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) - }) - .into(), + })) + .unwrap(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), + Some(Scheduled { + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), + weight: Weight::from_ref_time(10) + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1000,27 +1060,27 @@ fn test_migrate_origin() { ( 1, vec![ - Some(ScheduledV2 { + Some(Scheduled { maybe_id: None, priority: 11, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) - }) - .into(), + })) + .unwrap(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), + Some(Scheduled { + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), + weight: Weight::from_ref_time(10) + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1030,27 +1090,27 @@ fn test_migrate_origin() { ( 2, vec![ - Some(ScheduledV2 { + Some(Scheduled { maybe_id: None, priority: 12, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) - }) - .into(), + })) + .unwrap(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), + Some(Scheduled { + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), + weight: Weight::from_ref_time(10) + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1061,3 +1121,649 @@ fn test_migrate_origin() { ); }); } + +#[test] +fn postponed_named_task_cannot_be_rescheduled() { + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let hash = ::Hashing::hash_of(&call); + let len = call.using_encoded(|x| x.len()) as u32; + let hashed = Preimage::pick(hash, len); + let name: [u8; 32] = hash.as_ref().try_into().unwrap(); + + let address = Scheduler::do_schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + hashed.clone(), + ) + .unwrap(); + assert!(Preimage::is_requested(&hash)); + assert!(Lookup::::contains_key(name)); + + // Run to a very large block. + run_to_block(10); + // It was not executed. + assert!(logger::log().is_empty()); + assert!(Preimage::is_requested(&hash)); + // Postponing removes the lookup. + assert!(!Lookup::::contains_key(name)); + + // The agenda still contains the call. + let agenda = Agenda::::iter().collect::>(); + assert_eq!(agenda.len(), 1); + assert_eq!( + agenda[0].1, + vec![Some(Scheduled { + maybe_id: Some(name), + priority: 127, + call: hashed, + maybe_periodic: None, + origin: root().into(), + _phantom: Default::default(), + })] + ); + + // Finally add the preimage. + assert_ok!(Preimage::note(call.encode().into())); + run_to_block(1000); + // It did not execute. + assert!(logger::log().is_empty()); + assert!(Preimage::is_requested(&hash)); + + // Manually re-schedule the call by name does not work. + assert_err!( + Scheduler::do_reschedule_named(name, DispatchTime::At(1001)), + Error::::NotFound + ); + // Manually re-scheduling the call by address errors. + assert_err!( + Scheduler::do_reschedule(address, DispatchTime::At(1001)), + Error::::Named + ); + }); +} + +/// Using the scheduler as `v3::Anon` works. +#[test] +fn scheduler_v3_anon_basic_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule a call. + let _address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + // Executes in block 4. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // ... but not again. + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +#[test] +fn scheduler_v3_anon_cancel_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule a call. + let address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + // Cancel the call. + assert_ok!(>::cancel(address)); + // It did not get executed. + run_to_block(100); + assert!(logger::log().is_empty()); + // Cannot cancel again. + assert_err!(>::cancel(address), DispatchError::Unavailable); + }); +} + +#[test] +fn scheduler_v3_anon_reschedule_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule a call. + let address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Cannot re-schedule into the same block. + assert_noop!( + >::reschedule(address, DispatchTime::At(4)), + Error::::RescheduleNoChange + ); + // Cannot re-schedule into the past. + assert_noop!( + >::reschedule(address, DispatchTime::At(3)), + Error::::TargetBlockNumberInPast + ); + // Re-schedule to block 5. + assert_ok!(>::reschedule(address, DispatchTime::At(5))); + // Scheduled for block 5. + run_to_block(4); + assert!(logger::log().is_empty()); + run_to_block(5); + // Does execute in block 5. + assert_eq!(logger::log(), vec![(root(), 42)]); + // Cannot re-schedule executed task. + assert_noop!( + >::reschedule(address, DispatchTime::At(10)), + DispatchError::Unavailable + ); + }); +} + +/// Cancelling a call and then scheduling a second call for the same +/// block results in different addresses. +#[test] +fn scheduler_v3_anon_schedule_does_not_resuse_addr() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule both calls. + let addr_1 = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call.clone()).unwrap(), + ) + .unwrap(); + // Cancel the call. + assert_ok!(>::cancel(addr_1)); + let addr_2 = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + // Should not re-use the address. + assert!(addr_1 != addr_2); + }); +} + +#[test] +fn scheduler_v3_anon_next_schedule_time_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule a call. + let address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Scheduled for block 4. + assert_eq!(>::next_dispatch_time(address), Ok(4)); + // Block 4 executes it. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42)]); + + // It has no dispatch time anymore. + assert_noop!( + >::next_dispatch_time(address), + DispatchError::Unavailable + ); + }); +} + +/// Re-scheduling a task changes its next dispatch time. +#[test] +fn scheduler_v3_anon_reschedule_and_next_schedule_time_work() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule a call. + let old_address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Scheduled for block 4. + assert_eq!(>::next_dispatch_time(old_address), Ok(4)); + // Re-schedule to block 5. + let address = + >::reschedule(old_address, DispatchTime::At(5)).unwrap(); + assert!(address != old_address); + // Scheduled for block 5. + assert_eq!(>::next_dispatch_time(address), Ok(5)); + + // Block 4 does nothing. + run_to_block(4); + assert!(logger::log().is_empty()); + // Block 5 executes it. + run_to_block(5); + assert_eq!(logger::log(), vec![(root(), 42)]); + }); +} + +#[test] +fn scheduler_v3_anon_schedule_agenda_overflows() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule the maximal number allowed per block. + for _ in 0..max { + >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + } + + // One more time and it errors. + assert_noop!( + >::schedule(DispatchTime::At(4), None, 127, root(), bound,), + DispatchError::Exhausted + ); + + run_to_block(4); + // All scheduled calls are executed. + assert_eq!(logger::log().len() as u32, max); + }); +} + +/// Cancelling and scheduling does not overflow the agenda but fills holes. +#[test] +fn scheduler_v3_anon_cancel_and_schedule_fills_holes() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + assert!(max > 3, "This test only makes sense for MaxScheduledPerBlock > 3"); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let mut addrs = Vec::<_>::default(); + + // Schedule the maximal number allowed per block. + for _ in 0..max { + addrs.push( + >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(), + ); + } + // Cancel three of them. + for addr in addrs.into_iter().take(3) { + >::cancel(addr).unwrap(); + } + // Schedule three new ones. + for i in 0..3 { + let (_block, index) = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + assert_eq!(i, index); + } + + run_to_block(4); + // Maximum number of calls are executed. + assert_eq!(logger::log().len() as u32, max); + }); +} + +/// Re-scheduling does not overflow the agenda but fills holes. +#[test] +fn scheduler_v3_anon_reschedule_fills_holes() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + assert!(max > 3, "pre-condition: This test only makes sense for MaxScheduledPerBlock > 3"); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let mut addrs = Vec::<_>::default(); + + // Schedule the maximal number allowed per block. + for _ in 0..max { + addrs.push( + >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(), + ); + } + let mut new_addrs = Vec::<_>::default(); + // Reversed last three elements of block 4. + let last_three = addrs.into_iter().rev().take(3).collect::>(); + // Re-schedule three of them to block 5. + for addr in last_three.iter().cloned() { + new_addrs + .push(>::reschedule(addr, DispatchTime::At(5)).unwrap()); + } + // Re-scheduling them back into block 3 should result in the same addrs. + for (old, want) in new_addrs.into_iter().zip(last_three.into_iter().rev()) { + let new = >::reschedule(old, DispatchTime::At(4)).unwrap(); + assert_eq!(new, want); + } + + run_to_block(4); + // Maximum number of calls are executed. + assert_eq!(logger::log().len() as u32, max); + }); +} + +/// Re-scheduling into the same block produces a different address +/// if there is still space in the agenda. +#[test] +fn scheduler_v3_anon_reschedule_does_not_resuse_addr_if_agenda_not_full() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + assert!(max > 1, "This test only makes sense for MaxScheduledPerBlock > 1"); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule both calls. + let addr_1 = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call.clone()).unwrap(), + ) + .unwrap(); + // Cancel the call. + assert_ok!(>::cancel(addr_1)); + let addr_2 = >::schedule( + DispatchTime::At(5), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + // Re-schedule `call` to block 4. + let addr_3 = >::reschedule(addr_2, DispatchTime::At(4)).unwrap(); + + // Should not re-use the address. + assert!(addr_1 != addr_3); + }); +} + +/// The scheduler can be used as `v3::Named` trait. +#[test] +fn scheduler_v3_named_basic_works() { + use frame_support::traits::schedule::v3::Named; + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let name = [1u8; 32]; + + // Schedule a call. + let _address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + // Executes in block 4. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // ... but not again. + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +/// A named task can be cancelled by its name. +#[test] +fn scheduler_v3_named_cancel_named_works() { + use frame_support::traits::schedule::v3::Named; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let name = [1u8; 32]; + + // Schedule a call. + >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + // Cancel the call by name. + assert_ok!(>::cancel_named(name)); + // It did not get executed. + run_to_block(100); + assert!(logger::log().is_empty()); + // Cannot cancel again. + assert_noop!(>::cancel_named(name), DispatchError::Unavailable); + }); +} + +/// A named task can also be cancelled by its address. +#[test] +fn scheduler_v3_named_cancel_without_name_works() { + use frame_support::traits::schedule::v3::{Anon, Named}; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let name = [1u8; 32]; + + // Schedule a call. + let address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + // Cancel the call by address. + assert_ok!(>::cancel(address)); + // It did not get executed. + run_to_block(100); + assert!(logger::log().is_empty()); + // Cannot cancel again. + assert_err!(>::cancel(address), DispatchError::Unavailable); + }); +} + +/// A named task can be re-scheduled by its name but not by its address. +#[test] +fn scheduler_v3_named_reschedule_named_works() { + use frame_support::traits::schedule::v3::{Anon, Named}; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let name = [1u8; 32]; + + // Schedule a call. + let address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Cannot re-schedule by address. + assert_noop!( + >::reschedule(address, DispatchTime::At(10)), + Error::::Named, + ); + // Cannot re-schedule into the same block. + assert_noop!( + >::reschedule_named(name, DispatchTime::At(4)), + Error::::RescheduleNoChange + ); + // Cannot re-schedule into the past. + assert_noop!( + >::reschedule_named(name, DispatchTime::At(3)), + Error::::TargetBlockNumberInPast + ); + // Re-schedule to block 5. + assert_ok!(>::reschedule_named(name, DispatchTime::At(5))); + // Scheduled for block 5. + run_to_block(4); + assert!(logger::log().is_empty()); + run_to_block(5); + // Does execute in block 5. + assert_eq!(logger::log(), vec![(root(), 42)]); + // Cannot re-schedule executed task. + assert_noop!( + >::reschedule_named(name, DispatchTime::At(10)), + DispatchError::Unavailable + ); + // Also not by address. + assert_noop!( + >::reschedule(address, DispatchTime::At(10)), + DispatchError::Unavailable + ); + }); +} + +#[test] +fn scheduler_v3_named_next_schedule_time_works() { + use frame_support::traits::schedule::v3::{Anon, Named}; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let name = [1u8; 32]; + + // Schedule a call. + let address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Scheduled for block 4. + assert_eq!(>::next_dispatch_time(name), Ok(4)); + // Also works by address. + assert_eq!(>::next_dispatch_time(address), Ok(4)); + // Block 4 executes it. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42)]); + + // It has no dispatch time anymore. + assert_noop!( + >::next_dispatch_time(name), + DispatchError::Unavailable + ); + // Also not by address. + assert_noop!( + >::next_dispatch_time(address), + DispatchError::Unavailable + ); + }); +} diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index afbcf9373b2de..cb72fe3e2fdda 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_scheduler //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_scheduler // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_scheduler +// --chain=dev // --output=./frame/scheduler/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -44,16 +46,14 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_scheduler. pub trait WeightInfo { - fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight; - fn on_initialize_named_resolved(s: u32, ) -> Weight; - fn on_initialize_periodic_resolved(s: u32, ) -> Weight; - fn on_initialize_resolved(s: u32, ) -> Weight; - fn on_initialize_named_aborted(s: u32, ) -> Weight; - fn on_initialize_aborted(s: u32, ) -> Weight; - fn on_initialize_periodic_named(s: u32, ) -> Weight; - fn on_initialize_periodic(s: u32, ) -> Weight; - fn on_initialize_named(s: u32, ) -> Weight; - fn on_initialize(s: u32, ) -> Weight; + fn service_agendas_base() -> Weight; + fn service_agenda_base(s: u32, ) -> Weight; + fn service_task_base() -> Weight; + fn service_task_fetched(s: u32, ) -> Weight; + fn service_task_named() -> Weight; + fn service_task_periodic() -> Weight; + fn execute_dispatch_signed() -> Weight; + fn execute_dispatch_unsigned() -> Weight; fn schedule(s: u32, ) -> Weight; fn cancel(s: u32, ) -> Weight; fn schedule_named(s: u32, ) -> Weight; @@ -63,149 +63,84 @@ pub trait WeightInfo { /// Weights for pallet_scheduler using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(9_994_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as u64).saturating_mul(s as u64)) + // Storage: Scheduler IncompleteSince (r:1 w:1) + fn service_agendas_base() -> Weight { + Weight::from_ref_time(4_992_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(s as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((4 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(10_318_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[0, 512]`. + fn service_agenda_base(s: u32, ) -> Weight { + Weight::from_ref_time(4_320_000 as u64) + // Standard Error: 619 + .saturating_add(Weight::from_ref_time(336_713 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_675_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(s as u64))) + fn service_task_base() -> Weight { + Weight::from_ref_time(10_864_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_934_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(7_279_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - fn on_initialize_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(8_619_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + Weight::from_ref_time(24_586_000 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_138 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named(s: u32, ) -> Weight { - Weight::from_ref_time(16_129_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + fn service_task_named() -> Weight { + Weight::from_ref_time(13_127_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - fn on_initialize_periodic(s: u32, ) -> Weight { - Weight::from_ref_time(15_785_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn service_task_periodic() -> Weight { + Weight::from_ref_time(11_053_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named(s: u32, ) -> Weight { - Weight::from_ref_time(15_778_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn execute_dispatch_signed() -> Weight { + Weight::from_ref_time(4_158_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - fn on_initialize(s: u32, ) -> Weight { - Weight::from_ref_time(15_912_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn execute_dispatch_unsigned() -> Weight { + Weight::from_ref_time(4_104_000 as u64) } // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(18_013_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(20_074_000 as u64) + // Standard Error: 765 + .saturating_add(Weight::from_ref_time(343_285 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(18_131_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(21_509_000 as u64) + // Standard Error: 708 + .saturating_add(Weight::from_ref_time(323_013 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(21_230_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_427_000 as u64) + // Standard Error: 850 + .saturating_add(Weight::from_ref_time(357_265 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_139_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_875_000 as u64) + // Standard Error: 693 + .saturating_add(Weight::from_ref_time(336_643 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -213,149 +148,84 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(9_994_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as u64).saturating_mul(s as u64)) + // Storage: Scheduler IncompleteSince (r:1 w:1) + fn service_agendas_base() -> Weight { + Weight::from_ref_time(4_992_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(s as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((4 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(10_318_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[0, 512]`. + fn service_agenda_base(s: u32, ) -> Weight { + Weight::from_ref_time(4_320_000 as u64) + // Standard Error: 619 + .saturating_add(Weight::from_ref_time(336_713 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_675_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(s as u64))) + fn service_task_base() -> Weight { + Weight::from_ref_time(10_864_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_934_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(7_279_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - fn on_initialize_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(8_619_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + Weight::from_ref_time(24_586_000 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_138 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named(s: u32, ) -> Weight { - Weight::from_ref_time(16_129_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + fn service_task_named() -> Weight { + Weight::from_ref_time(13_127_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - fn on_initialize_periodic(s: u32, ) -> Weight { - Weight::from_ref_time(15_785_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn service_task_periodic() -> Weight { + Weight::from_ref_time(11_053_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named(s: u32, ) -> Weight { - Weight::from_ref_time(15_778_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn execute_dispatch_signed() -> Weight { + Weight::from_ref_time(4_158_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - fn on_initialize(s: u32, ) -> Weight { - Weight::from_ref_time(15_912_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn execute_dispatch_unsigned() -> Weight { + Weight::from_ref_time(4_104_000 as u64) } // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(18_013_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(20_074_000 as u64) + // Standard Error: 765 + .saturating_add(Weight::from_ref_time(343_285 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(18_131_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(21_509_000 as u64) + // Standard Error: 708 + .saturating_add(Weight::from_ref_time(323_013 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(21_230_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_427_000 as u64) + // Standard Error: 850 + .saturating_add(Weight::from_ref_time(357_265 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_139_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_875_000 as u64) + // Standard Error: 693 + .saturating_add(Weight::from_ref_time(336_643 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index acbcb65a3e986..1551d85ea4c96 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -169,6 +169,10 @@ pub fn expand_outer_origin( &self.caller } + fn into_caller(self) -> Self::PalletsOrigin { + self.caller + } + fn try_with_caller( mut self, f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -190,13 +194,6 @@ pub fn expand_outer_origin( fn signed(by: Self::AccountId) -> Self { #system_path::RawOrigin::Signed(by).into() } - - fn as_signed(self) -> Option { - match self.caller { - OriginCaller::system(#system_path::RawOrigin::Signed(by)) => Some(by), - _ => None, - } - } } #[derive( @@ -215,7 +212,6 @@ pub fn expand_outer_origin( // For backwards compatibility and ease of accessing these functions. #[allow(dead_code)] impl RuntimeOrigin { - #[doc = #doc_string_none_origin] pub fn none() -> Self { ::none() @@ -238,6 +234,21 @@ pub fn expand_outer_origin( } } + impl #scrate::traits::CallerTrait<<#runtime as #system_path::Config>::AccountId> for OriginCaller { + fn into_system(self) -> Option<#system_path::RawOrigin<<#runtime as #system_path::Config>::AccountId>> { + match self { + OriginCaller::system(x) => Some(x), + _ => None, + } + } + fn as_system_ref(&self) -> Option<&#system_path::RawOrigin<<#runtime as #system_path::Config>::AccountId>> { + match &self { + OriginCaller::system(o) => Some(o), + _ => None, + } + } + } + impl TryFrom for #system_path::Origin<#runtime> { type Error = OriginCaller; fn try_from(x: OriginCaller) diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index db2bc90658ee2..d497a672e2970 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -3181,8 +3181,8 @@ mod tests { dispatch::{DispatchClass, DispatchInfo, Pays}, metadata::*, traits::{ - CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, - OnRuntimeUpgrade, PalletInfo, + CallerTrait, CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, + OnInitialize, OnRuntimeUpgrade, PalletInfo, }, }; use sp_weights::RuntimeDbWeight; @@ -3300,6 +3300,16 @@ mod tests { } } + impl CallerTrait<::AccountId> for OuterOrigin { + fn into_system(self) -> Option::AccountId>> { + unimplemented!("Not required in tests!") + } + + fn as_system_ref(&self) -> Option<&RawOrigin<::AccountId>> { + unimplemented!("Not required in tests!") + } + } + impl crate::traits::OriginTrait for OuterOrigin { type Call = ::RuntimeCall; type PalletsOrigin = OuterOrigin; @@ -3325,6 +3335,10 @@ mod tests { unimplemented!("Not required in tests!") } + fn into_caller(self) -> Self::PalletsOrigin { + unimplemented!("Not required in tests!") + } + fn try_with_caller( self, _f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -3344,6 +3358,9 @@ mod tests { fn as_signed(self) -> Option { unimplemented!("Not required in tests!") } + fn as_system_ref(&self) -> Option<&RawOrigin> { + unimplemented!("Not required in tests!") + } } impl system::Config for TraitImpl { diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index d51c32649a797..302d3354dae5e 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -58,10 +58,11 @@ pub use misc::{ Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, DefensiveSaturating, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, - IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PreimageProvider, - PreimageRecipient, PrivilegeCmp, SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, - WrapperKeepOpaque, WrapperOpaque, + IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, + SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, WrapperKeepOpaque, WrapperOpaque, }; +#[allow(deprecated)] +pub use misc::{PreimageProvider, PreimageRecipient}; #[doc(hidden)] pub use misc::{DEFENSIVE_OP_INTERNAL_ERROR, DEFENSIVE_OP_PUBLIC_ERROR}; @@ -96,8 +97,9 @@ mod dispatch; #[allow(deprecated)] pub use dispatch::EnsureOneOf; pub use dispatch::{ - AsEnsureOriginWithArg, EitherOf, EitherOfDiverse, EnsureOrigin, EnsureOriginWithArg, - MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, UnfilteredDispatchable, + AsEnsureOriginWithArg, CallerTrait, EitherOf, EitherOfDiverse, EnsureOrigin, + EnsureOriginWithArg, MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, + UnfilteredDispatchable, }; mod voting; @@ -106,6 +108,9 @@ pub use voting::{ U128CurrencyToVote, VoteTally, }; +mod preimages; +pub use preimages::{Bounded, BoundedInline, FetchResult, Hash, QueryPreimage, StorePreimage}; + #[cfg(feature = "try-runtime")] mod try_runtime; #[cfg(feature = "try-runtime")] diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index c0e7e32a5529e..b96cfae4500e2 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -236,17 +236,25 @@ pub trait UnfilteredDispatchable { fn dispatch_bypass_filter(self, origin: Self::RuntimeOrigin) -> DispatchResultWithPostInfo; } +/// The trait implemented by the overarching enumeration of the different pallets' origins. +/// Unlike `OriginTrait` impls, this does not include any kind of dispatch/call filter. Also, this +/// trait is more flexible in terms of how it can be used: it is a `Parameter` and `Member`, so it +/// can be used as dispatchable parameters as well as in storage items. +pub trait CallerTrait: Parameter + Member + From> { + /// Extract the signer from the message if it is a `Signed` origin. + fn into_system(self) -> Option>; + + /// Extract a reference to the system-level `RawOrigin` if it is that. + fn as_system_ref(&self) -> Option<&RawOrigin>; +} + /// Methods available on `frame_system::Config::RuntimeOrigin`. pub trait OriginTrait: Sized { /// Runtime call type, as in `frame_system::Config::Call` type Call; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: Parameter - + Member - + Into - + From> - + MaxEncodedLen; + type PalletsOrigin: Into + CallerTrait + MaxEncodedLen; /// The AccountId used across the system. type AccountId; @@ -266,9 +274,12 @@ pub trait OriginTrait: Sized { /// For root origin caller, the filters are bypassed and true is returned. fn filter_call(&self, call: &Self::Call) -> bool; - /// Get the caller. + /// Get a reference to the caller (`CallerTrait` impl). fn caller(&self) -> &Self::PalletsOrigin; + /// Consume `self` and return the caller. + fn into_caller(self) -> Self::PalletsOrigin; + /// Do something with the caller, consuming self but returning it if the caller was unused. fn try_with_caller( self, @@ -285,7 +296,20 @@ pub trait OriginTrait: Sized { fn signed(by: Self::AccountId) -> Self; /// Extract the signer from the message if it is a `Signed` origin. - fn as_signed(self) -> Option; + fn as_signed(self) -> Option { + self.into_caller().into_system().and_then(|s| { + if let RawOrigin::Signed(who) = s { + Some(who) + } else { + None + } + }) + } + + /// Extract a reference to the sytsem origin, if that's what the caller is. + fn as_system_ref(&self) -> Option<&RawOrigin> { + self.caller().as_system_ref() + } } #[cfg(test)] diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 7fc4a6fb08a5a..5a976478fa7c4 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -932,7 +932,7 @@ pub trait PreimageRecipient: PreimageProvider { /// Maximum size of a preimage. type MaxSize: Get; - /// Store the bytes of a preimage on chain. + /// Store the bytes of a preimage on chain infallible due to the bounded type. fn note_preimage(bytes: crate::BoundedVec); /// Clear a previously noted preimage. This is infallible and should be treated more like a diff --git a/frame/support/src/traits/preimages.rs b/frame/support/src/traits/preimages.rs new file mode 100644 index 0000000000000..594532ba96903 --- /dev/null +++ b/frame/support/src/traits/preimages.rs @@ -0,0 +1,317 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Stuff for dealing with 32-byte hashed preimages. + +use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use sp_core::{RuntimeDebug, H256}; +use sp_io::hashing::blake2_256; +use sp_runtime::{traits::ConstU32, DispatchError}; +use sp_std::borrow::Cow; + +pub type Hash = H256; +pub type BoundedInline = crate::BoundedVec>; + +#[derive( + Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, scale_info::TypeInfo, RuntimeDebug, +)] +#[codec(mel_bound())] +pub enum Bounded { + /// A Blake2 256 hash with no preimage length. We + /// do not support creation of this except for transitioning from legacy state. + /// In the future we will make this a pure `Dummy` item storing only the final `dummy` field. + Legacy { hash: Hash, dummy: sp_std::marker::PhantomData }, + /// A an bounded `Call`. Its encoding must be at most 128 bytes. + Inline(BoundedInline), + /// A Blake2-256 hash of the call together with an upper limit for its size. + Lookup { hash: Hash, len: u32 }, +} + +impl Bounded { + /// Casts the wrapped type into something that encodes alike. + /// + /// # Examples + /// ``` + /// use frame_support::traits::Bounded; + /// + /// // Transmute from `String` to `&str`. + /// let x: Bounded = Bounded::Inline(Default::default()); + /// let _: Bounded<&str> = x.transmute(); + /// ``` + pub fn transmute(self) -> Bounded + where + T: Encode + EncodeLike, + { + use Bounded::*; + match self { + Legacy { hash, .. } => Legacy { hash, dummy: sp_std::marker::PhantomData }, + Inline(x) => Inline(x), + Lookup { hash, len } => Lookup { hash, len }, + } + } + + /// Returns the hash of the preimage. + /// + /// The hash is re-calculated every time if the preimage is inlined. + pub fn hash(&self) -> H256 { + use Bounded::*; + match self { + Legacy { hash, .. } => *hash, + Inline(x) => blake2_256(x.as_ref()).into(), + Lookup { hash, .. } => *hash, + } + } +} + +// The maximum we expect a single legacy hash lookup to be. +const MAX_LEGACY_LEN: u32 = 1_000_000; + +impl Bounded { + /// Returns the length of the preimage or `None` if the length is unknown. + pub fn len(&self) -> Option { + match self { + Self::Legacy { .. } => None, + Self::Inline(i) => Some(i.len() as u32), + Self::Lookup { len, .. } => Some(*len), + } + } + + /// Returns whether the image will require a lookup to be peeked. + pub fn lookup_needed(&self) -> bool { + match self { + Self::Inline(..) => false, + Self::Legacy { .. } | Self::Lookup { .. } => true, + } + } + + /// The maximum length of the lookup that is needed to peek `Self`. + pub fn lookup_len(&self) -> Option { + match self { + Self::Inline(..) => None, + Self::Legacy { .. } => Some(MAX_LEGACY_LEN), + Self::Lookup { len, .. } => Some(*len), + } + } + + /// Constructs a `Lookup` bounded item. + pub fn unrequested(hash: Hash, len: u32) -> Self { + Self::Lookup { hash, len } + } + + /// Constructs a `Legacy` bounded item. + #[deprecated = "This API is only for transitioning to Scheduler v3 API"] + pub fn from_legacy_hash(hash: impl Into) -> Self { + Self::Legacy { hash: hash.into(), dummy: sp_std::marker::PhantomData } + } +} + +pub type FetchResult = Result, DispatchError>; + +/// A interface for looking up preimages from their hash on chain. +pub trait QueryPreimage { + /// Returns whether a preimage exists for a given hash and if so its length. + fn len(hash: &Hash) -> Option; + + /// Returns the preimage for a given hash. If given, `len` must be the size of the preimage. + fn fetch(hash: &Hash, len: Option) -> FetchResult; + + /// Returns whether a preimage request exists for a given hash. + fn is_requested(hash: &Hash) -> bool; + + /// Request that someone report a preimage. Providers use this to optimise the economics for + /// preimage reporting. + fn request(hash: &Hash); + + /// Cancel a previous preimage request. + fn unrequest(hash: &Hash); + + /// Request that the data required for decoding the given `bounded` value is made available. + fn hold(bounded: &Bounded) { + use Bounded::*; + match bounded { + Inline(..) => {}, + Legacy { hash, .. } | Lookup { hash, .. } => Self::request(hash), + } + } + + /// No longer request that the data required for decoding the given `bounded` value is made + /// available. + fn drop(bounded: &Bounded) { + use Bounded::*; + match bounded { + Inline(..) => {}, + Legacy { hash, .. } | Lookup { hash, .. } => Self::unrequest(hash), + } + } + + /// Check to see if all data required for the given `bounded` value is available for its + /// decoding. + fn have(bounded: &Bounded) -> bool { + use Bounded::*; + match bounded { + Inline(..) => true, + Legacy { hash, .. } | Lookup { hash, .. } => Self::len(hash).is_some(), + } + } + + /// Create a `Bounded` instance based on the `hash` and `len` of the encoded value. This may not + /// be `peek`-able or `realize`-able. + fn pick(hash: Hash, len: u32) -> Bounded { + Self::request(&hash); + Bounded::Lookup { hash, len } + } + + /// Convert the given `bounded` instance back into its original instance, also returning the + /// exact size of its encoded form if it needed to be looked-up from a stored preimage). + /// + /// NOTE: This does not remove any data needed for realization. If you will no longer use the + /// `bounded`, call `realize` instead or call `drop` afterwards. + fn peek(bounded: &Bounded) -> Result<(T, Option), DispatchError> { + use Bounded::*; + match bounded { + Inline(data) => T::decode(&mut &data[..]).ok().map(|x| (x, None)), + Lookup { hash, len } => { + let data = Self::fetch(hash, Some(*len))?; + T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) + }, + Legacy { hash, .. } => { + let data = Self::fetch(hash, None)?; + T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) + }, + } + .ok_or(DispatchError::Corruption) + } + + /// Convert the given `bounded` value back into its original instance. If successful, + /// `drop` any data backing it. This will not break the realisability of independently + /// created instances of `Bounded` which happen to have identical data. + fn realize(bounded: &Bounded) -> Result<(T, Option), DispatchError> { + let r = Self::peek(bounded)?; + Self::drop(bounded); + Ok(r) + } +} + +/// A interface for managing preimages to hashes on chain. +/// +/// Note that this API does not assume any underlying user is calling, and thus +/// does not handle any preimage ownership or fees. Other system level logic that +/// uses this API should implement that on their own side. +pub trait StorePreimage: QueryPreimage { + /// The maximum length of preimage we can store. + /// + /// This is the maximum length of the *encoded* value that can be passed to `bound`. + const MAX_LENGTH: usize; + + /// Request and attempt to store the bytes of a preimage on chain. + /// + /// May return `DispatchError::Exhausted` if the preimage is just too big. + fn note(bytes: Cow<[u8]>) -> Result; + + /// Attempt to clear a previously noted preimage. Exactly the same as `unrequest` but is + /// provided for symmetry. + fn unnote(hash: &Hash) { + Self::unrequest(hash) + } + + /// Convert an otherwise unbounded or large value into a type ready for placing in storage. The + /// result is a type whose `MaxEncodedLen` is 131 bytes. + /// + /// NOTE: Once this API is used, you should use either `drop` or `realize`. + fn bound(t: T) -> Result, DispatchError> { + let data = t.encode(); + let len = data.len() as u32; + Ok(match BoundedInline::try_from(data) { + Ok(bounded) => Bounded::Inline(bounded), + Err(unbounded) => Bounded::Lookup { hash: Self::note(unbounded.into())?, len }, + }) + } +} + +impl QueryPreimage for () { + fn len(_: &Hash) -> Option { + None + } + fn fetch(_: &Hash, _: Option) -> FetchResult { + Err(DispatchError::Unavailable) + } + fn is_requested(_: &Hash) -> bool { + false + } + fn request(_: &Hash) {} + fn unrequest(_: &Hash) {} +} + +impl StorePreimage for () { + const MAX_LENGTH: usize = 0; + fn note(_: Cow<[u8]>) -> Result { + Err(DispatchError::Exhausted) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bounded_vec, BoundedVec}; + + #[test] + fn bounded_size_is_correct() { + assert_eq!(> as MaxEncodedLen>::max_encoded_len(), 131); + } + + #[test] + fn bounded_basic_works() { + let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; + let len = data.len() as u32; + let hash = blake2_256(&data).into(); + + // Inline works + { + let bound: Bounded> = Bounded::Inline(data.clone()); + assert_eq!(bound.hash(), hash); + assert_eq!(bound.len(), Some(len)); + assert!(!bound.lookup_needed()); + assert_eq!(bound.lookup_len(), None); + } + // Legacy works + { + let bound: Bounded> = Bounded::Legacy { hash, dummy: Default::default() }; + assert_eq!(bound.hash(), hash); + assert_eq!(bound.len(), None); + assert!(bound.lookup_needed()); + assert_eq!(bound.lookup_len(), Some(1_000_000)); + } + // Lookup works + { + let bound: Bounded> = Bounded::Lookup { hash, len: data.len() as u32 }; + assert_eq!(bound.hash(), hash); + assert_eq!(bound.len(), Some(len)); + assert!(bound.lookup_needed()); + assert_eq!(bound.lookup_len(), Some(len)); + } + } + + #[test] + fn bounded_transmuting_works() { + let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; + + // Transmute a `String` into a `&str`. + let x: Bounded = Bounded::Inline(data.clone()); + let y: Bounded<&str> = x.transmute(); + assert_eq!(y, Bounded::Inline(data)); + } +} diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs index 0dbbbd9e2a553..b8e6a7f807904 100644 --- a/frame/support/src/traits/schedule.rs +++ b/frame/support/src/traits/schedule.rs @@ -17,6 +17,8 @@ //! Traits and associated utilities for scheduling dispatchables in FRAME. +#[allow(deprecated)] +use super::PreimageProvider; use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{traits::Saturating, DispatchError, RuntimeDebug}; @@ -128,6 +130,7 @@ impl MaybeHashed { } } +// TODO: deprecate pub mod v1 { use super::*; @@ -283,6 +286,7 @@ pub mod v1 { } } +// TODO: deprecate pub mod v2 { use super::*; @@ -375,6 +379,97 @@ pub mod v2 { } } -pub use v1::*; +pub mod v3 { + use super::*; + use crate::traits::Bounded; -use super::PreimageProvider; + /// A type that can be used as a scheduler. + pub trait Anon { + /// An address which can be used for removing a scheduled task. + type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + Debug + TypeInfo; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// This is not named. + fn schedule( + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Bounded, + ) -> Result; + + /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, + /// also. + /// + /// Will return an `Unavailable` error if the `address` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + /// + /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For + /// that, you must name the task explicitly using the `Named` trait. + fn cancel(address: Self::Address) -> Result<(), DispatchError>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. For periodic tasks, + /// this dispatch is guaranteed to succeed only before the *initial* execution; for + /// others, use `reschedule_named`. + /// + /// Will return an `Unavailable` error if the `address` is invalid. + fn reschedule( + address: Self::Address, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an `Unavailable` error if the `address` is invalid. + fn next_dispatch_time(address: Self::Address) -> Result; + } + + pub type TaskName = [u8; 32]; + + /// A type that can be used as a scheduler. + pub trait Named { + /// An address which can be used for removing a scheduled task. + type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// - `id`: The identity of the task. This must be unique and will return an error if not. + fn schedule_named( + id: TaskName, + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Bounded, + ) -> Result; + + /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances + /// of that, also. + /// + /// Will return an `Unavailable` error if the `id` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + fn cancel_named(id: TaskName) -> Result<(), DispatchError>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. + /// + /// Will return an `Unavailable` error if the `id` is invalid. + fn reschedule_named( + id: TaskName, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an `Unavailable` error if the `id` is invalid. + fn next_dispatch_time(id: TaskName) -> Result; + } +} + +pub use v1::*; diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index b0716d569409c..b8a9a1128d669 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 160 others + and 161 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 926dc92530659..5032f63bc1b1b 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 160 others + and 161 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 563190a06f76f..8d3d7a71a313e 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 77 others + and 78 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index c10005223b674..ebf24a1232e3c 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 77 others + and 78 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index dc74157da79de..7577d0dc6b158 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -222,7 +222,10 @@ pub mod pallet { + OriginTrait; /// The aggregated `RuntimeCall` type. - type RuntimeCall: Dispatchable + Debug; + type RuntimeCall: Parameter + + Dispatchable + + Debug + + From>; /// Account index (aka nonce) type. This stores the number of previous transactions /// associated with a sender account. diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index 44aea86be6f19..d4446cb8031ab 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -96,7 +96,6 @@ impl pallet_preimage::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type MaxSize = ConstU32<{ 4096 * 1024 }>; // PreimageMaxSize Taken from Polkadot as reference. type BaseDeposit = ConstU64<1>; type ByteDeposit = ConstU64<1>; type WeightInfo = (); diff --git a/primitives/core/src/bounded/bounded_vec.rs b/primitives/core/src/bounded/bounded_vec.rs index 85f2bed316793..1832e43e8646c 100644 --- a/primitives/core/src/bounded/bounded_vec.rs +++ b/primitives/core/src/bounded/bounded_vec.rs @@ -276,6 +276,14 @@ impl<'a, T, S> sp_std::iter::IntoIterator for BoundedSlice<'a, T, S> { } } +impl<'a, T, S: Get> BoundedSlice<'a, T, S> { + /// Create an instance from the first elements of the given slice (or all of it if it is smaller + /// than the length bound). + pub fn truncate_from(s: &'a [T]) -> Self { + Self(&s[0..(s.len().min(S::get() as usize))], PhantomData) + } +} + impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { let inner = Vec::::decode(input)?; @@ -620,12 +628,12 @@ impl> BoundedVec { /// # Panics /// /// Panics if `index > len`. - pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), T> { if self.len() < Self::bound() { self.0.insert(index, element); Ok(()) } else { - Err(()) + Err(element) } } @@ -635,12 +643,12 @@ impl> BoundedVec { /// # Panics /// /// Panics if the new capacity exceeds isize::MAX bytes. - pub fn try_push(&mut self, element: T) -> Result<(), ()> { + pub fn try_push(&mut self, element: T) -> Result<(), T> { if self.len() < Self::bound() { self.0.push(element); Ok(()) } else { - Err(()) + Err(element) } } } @@ -673,13 +681,13 @@ where } impl> TryFrom> for BoundedVec { - type Error = (); + type Error = Vec; fn try_from(t: Vec) -> Result { if t.len() <= Self::bound() { // explicit check just above Ok(Self::unchecked_from(t)) } else { - Err(()) + Err(t) } } } @@ -886,6 +894,16 @@ pub mod test { use super::*; use crate::{bounded_vec, ConstU32}; + #[test] + fn slice_truncate_from_works() { + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4, 5]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3]); + assert_eq!(bounded.deref(), &[1, 2, 3]); + } + #[test] fn slide_works() { let mut b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 8017a6ac529a2..96706dd919650 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -547,6 +547,12 @@ pub enum DispatchError { /// The number of transactional layers has been reached, or we are not in a transactional /// layer. Transactional(TransactionalError), + /// Resources exhausted, e.g. attempt to read/write data which is too large to manipulate. + Exhausted, + /// The state is corrupt; this is generally not going to fix itself. + Corruption, + /// Some resource (e.g. a preimage) is unavailable right now. This might fix itself later. + Unavailable, } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about @@ -671,18 +677,21 @@ impl From<&'static str> for DispatchError { impl From for &'static str { fn from(err: DispatchError) -> &'static str { + use DispatchError::*; match err { - DispatchError::Other(msg) => msg, - DispatchError::CannotLookup => "Cannot lookup", - DispatchError::BadOrigin => "Bad origin", - DispatchError::Module(ModuleError { message, .. }) => - message.unwrap_or("Unknown module error"), - DispatchError::ConsumerRemaining => "Consumer remaining", - DispatchError::NoProviders => "No providers", - DispatchError::TooManyConsumers => "Too many consumers", - DispatchError::Token(e) => e.into(), - DispatchError::Arithmetic(e) => e.into(), - DispatchError::Transactional(e) => e.into(), + Other(msg) => msg, + CannotLookup => "Cannot lookup", + BadOrigin => "Bad origin", + Module(ModuleError { message, .. }) => message.unwrap_or("Unknown module error"), + ConsumerRemaining => "Consumer remaining", + NoProviders => "No providers", + TooManyConsumers => "Too many consumers", + Token(e) => e.into(), + Arithmetic(e) => e.into(), + Transactional(e) => e.into(), + Exhausted => "Resources exhausted", + Corruption => "State corrupt", + Unavailable => "Resource unavailable", } } } @@ -698,33 +707,37 @@ where impl traits::Printable for DispatchError { fn print(&self) { + use DispatchError::*; "DispatchError".print(); match self { - Self::Other(err) => err.print(), - Self::CannotLookup => "Cannot lookup".print(), - Self::BadOrigin => "Bad origin".print(), - Self::Module(ModuleError { index, error, message }) => { + Other(err) => err.print(), + CannotLookup => "Cannot lookup".print(), + BadOrigin => "Bad origin".print(), + Module(ModuleError { index, error, message }) => { index.print(); error.print(); if let Some(msg) = message { msg.print(); } }, - Self::ConsumerRemaining => "Consumer remaining".print(), - Self::NoProviders => "No providers".print(), - Self::TooManyConsumers => "Too many consumers".print(), - Self::Token(e) => { + ConsumerRemaining => "Consumer remaining".print(), + NoProviders => "No providers".print(), + TooManyConsumers => "Too many consumers".print(), + Token(e) => { "Token error: ".print(); <&'static str>::from(*e).print(); }, - Self::Arithmetic(e) => { + Arithmetic(e) => { "Arithmetic error: ".print(); <&'static str>::from(*e).print(); }, - Self::Transactional(e) => { + Transactional(e) => { "Transactional error: ".print(); <&'static str>::from(*e).print(); }, + Exhausted => "Resources exhausted".print(), + Corruption => "State corrupt".print(), + Unavailable => "Resource unavailable".print(), } } } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index a64e3f25ef041..3db0e5510057b 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -37,8 +37,9 @@ use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; use frame_support::{ + dispatch::RawOrigin, parameter_types, - traits::{ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, + traits::{CallerTrait, ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, weights::{RuntimeDbWeight, Weight}, }; use frame_system::limits::{BlockLength, BlockWeights}; @@ -119,7 +120,7 @@ pub fn native_version() -> NativeVersion { } /// Calls in transactions. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct Transfer { pub from: AccountId, pub to: AccountId, @@ -150,7 +151,7 @@ impl Transfer { } /// Extrinsic for test-runtime. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum Extrinsic { AuthoritiesChange(Vec), Transfer { @@ -446,11 +447,22 @@ impl GetRuntimeBlockType for Runtime { #[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq, TypeInfo, MaxEncodedLen)] pub struct RuntimeOrigin; -impl From> for RuntimeOrigin { - fn from(_o: frame_system::Origin) -> Self { +impl From::AccountId>> for RuntimeOrigin { + fn from(_: RawOrigin<::AccountId>) -> Self { unimplemented!("Not required in tests!") } } + +impl CallerTrait<::AccountId> for RuntimeOrigin { + fn into_system(self) -> Option::AccountId>> { + unimplemented!("Not required in tests!") + } + + fn as_system_ref(&self) -> Option<&RawOrigin<::AccountId>> { + unimplemented!("Not required in tests!") + } +} + impl From for Result, RuntimeOrigin> { fn from(_origin: RuntimeOrigin) -> Result, RuntimeOrigin> { unimplemented!("Not required in tests!") @@ -482,6 +494,10 @@ impl frame_support::traits::OriginTrait for RuntimeOrigin { unimplemented!("Not required in tests!") } + fn into_caller(self) -> Self::PalletsOrigin { + unimplemented!("Not required in tests!") + } + fn try_with_caller( self, _f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -501,6 +517,9 @@ impl frame_support::traits::OriginTrait for RuntimeOrigin { fn as_signed(self) -> Option { unimplemented!("Not required in tests!") } + fn as_system_ref(&self) -> Option<&RawOrigin> { + unimplemented!("Not required in tests!") + } } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] @@ -583,6 +602,12 @@ parameter_types! { BlockWeights::with_sensible_defaults(Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75)); } +impl From> for Extrinsic { + fn from(_: frame_system::Call) -> Self { + unimplemented!("Not required in tests!") + } +} + impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; From fc67cbb66d8c484bc7b7506fc1300344d12ecbad Mon Sep 17 00:00:00 2001 From: Andronik Date: Wed, 5 Oct 2022 23:07:15 +0200 Subject: [PATCH 45/75] update kvdb & co (#12312) * upgrade kvdb & co * remove patch * update Cargo.lock * upgrade impl-serde * fix parsing test * actually fix it * FFS --- Cargo.lock | 89 +++++++++---------------- bin/node/bench/Cargo.toml | 6 +- bin/node/bench/src/tempdb.rs | 13 ++-- bin/node/inspect/src/lib.rs | 4 +- client/db/Cargo.toml | 8 +-- client/db/src/upgrade.rs | 9 ++- client/informant/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/state-db/Cargo.toml | 2 +- client/transaction-pool/Cargo.toml | 2 +- frame/support/Cargo.toml | 2 +- primitives/arithmetic/Cargo.toml | 2 +- primitives/arithmetic/fuzzer/Cargo.toml | 2 +- primitives/core/Cargo.toml | 6 +- primitives/core/src/uint.rs | 3 - primitives/database/Cargo.toml | 2 +- primitives/runtime-interface/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/storage/Cargo.toml | 2 +- primitives/test-primitives/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 4 +- primitives/version/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 4 +- utils/frame/benchmarking-cli/Cargo.toml | 4 +- 24 files changed, 73 insertions(+), 103 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53f370a930626..52b74628f07d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -551,19 +551,18 @@ checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" [[package]] name = "bincode" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "byteorder", "serde", ] [[package]] name = "bindgen" -version = "0.59.2" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" dependencies = [ "bitflags", "cexpr", @@ -778,9 +777,9 @@ dependencies = [ [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" @@ -981,7 +980,7 @@ checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" dependencies = [ "glob", "libc", - "libloading 0.7.0", + "libloading", ] [[package]] @@ -2063,9 +2062,9 @@ dependencies = [ [[package]] name = "fixed-hash" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", "rand 0.8.5", @@ -2442,18 +2441,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "fs-swap" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" -dependencies = [ - "lazy_static", - "libc", - "libloading 0.5.2", - "winapi", -] - [[package]] name = "fs2" version = "0.4.3" @@ -3055,9 +3042,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" dependencies = [ "serde", ] @@ -3449,9 +3436,9 @@ dependencies = [ [[package]] name = "kvdb" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a301d8ecb7989d4a6e2c57a49baca77d353bdbf879909debe3f375fe25d61f86" +checksum = "585089ceadba0197ffe9af6740ab350b325e3c1f5fccfbc3522e0250c750409b" dependencies = [ "parity-util-mem", "smallvec", @@ -3459,9 +3446,9 @@ dependencies = [ [[package]] name = "kvdb-memorydb" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece7e668abd21387aeb6628130a6f4c802787f014fa46bc83221448322250357" +checksum = "40d109c87bfb7759edd2a49b2649c1afe25af785d930ad6a38479b4dc70dd873" dependencies = [ "kvdb", "parity-util-mem", @@ -3470,15 +3457,13 @@ dependencies = [ [[package]] name = "kvdb-rocksdb" -version = "0.15.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca7fbdfd71cd663dceb0faf3367a99f8cf724514933e9867cec4995b6027cbc1" +checksum = "c076cc2cdbac89b9910c853a36c957d3862a779f31c2661174222cefb49ee597" dependencies = [ - "fs-swap", "kvdb", "log", "num_cpus", - "owning_ref", "parity-util-mem", "parking_lot 0.12.1", "regex", @@ -3522,16 +3507,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "libloading" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" -dependencies = [ - "cc", - "winapi", -] - [[package]] name = "libloading" version = "0.7.0" @@ -4065,9 +4040,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.6.1+6.28.2" +version = "0.8.0+7.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" +checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" dependencies = [ "bindgen", "bzip2-sys", @@ -4364,9 +4339,9 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" +checksum = "34ac11bb793c28fa095b7554466f53b3a60a2cd002afdac01bcf135cbd73a269" dependencies = [ "hash-db", "hashbrown 0.12.3", @@ -6637,9 +6612,9 @@ checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" [[package]] name = "parity-util-mem" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" +checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" dependencies = [ "cfg-if 1.0.0", "hashbrown 0.12.3", @@ -7011,9 +6986,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +checksum = "5cfd65aea0c5fa0bfcc7c9e7ca828c921ef778f43d325325ec84bda371bfa75a" dependencies = [ "fixed-hash", "impl-codec", @@ -7637,9 +7612,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" +checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" dependencies = [ "libc", "librocksdb-sys", @@ -10938,9 +10913,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.4.2+5.2.1-patched.2" +version = "0.5.1+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5844e429d797c62945a566f8da4e24c7fe3fbd5d6617fd8bf7a0b7dc1ee0f22e" +checksum = "931e876f91fed0827f863a2d153897790da0b24d882c721a79cb3beb0b903261" dependencies = [ "cc", "fs_extra", @@ -11203,9 +11178,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5704f0d6130bd83608e4370c19e20c8a6ec03e80363e493d0234efca005265a" +checksum = "f0dae77b1daad50cd3ed94c506d2dab27e2e47f7b5153a6d4b1992bb3f6028cb" dependencies = [ "criterion", "hash-db", @@ -11792,7 +11767,7 @@ dependencies = [ "enum-iterator", "enumset", "leb128", - "libloading 0.7.0", + "libloading", "loupe", "object 0.28.3", "rkyv", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index a9c367ae8aa3d..42953da837100 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -23,8 +23,8 @@ sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machi serde = "1.0.136" serde_json = "1.0.85" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } -kvdb = "0.11.0" -kvdb-rocksdb = "0.15.1" +kvdb = "0.12.0" +kvdb-rocksdb = "0.16.0" sp-trie = { version = "6.0.0", path = "../../../primitives/trie" } sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } @@ -37,7 +37,7 @@ tempfile = "3.1.0" fs_extra = "1" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } parity-db = { version = "0.3" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 22c5980fd6524..eb3bb1d3fccd7 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use kvdb::{DBTransaction, KeyValueDB}; +use kvdb::{DBKeyValue, DBTransaction, KeyValueDB}; use kvdb_rocksdb::{Database, DatabaseConfig}; use std::{io, path::PathBuf, sync::Arc}; @@ -38,7 +38,7 @@ impl KeyValueDB for ParityDbWrapper { } /// Get a value by partial key. Only works for flushed data. - fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> io::Result>> { unimplemented!() } @@ -56,7 +56,7 @@ impl KeyValueDB for ParityDbWrapper { } /// Iterate over flushed data for a given column. - fn iter<'a>(&'a self, _col: u32) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, _col: u32) -> Box> + 'a> { unimplemented!() } @@ -65,12 +65,7 @@ impl KeyValueDB for ParityDbWrapper { &'a self, _col: u32, _prefix: &'a [u8], - ) -> Box, Box<[u8]>)> + 'a> { - unimplemented!() - } - - /// Attempt to replace this database with a new one located at the given path. - fn restore(&self, _new_db: &str) -> io::Result<()> { + ) -> Box> + 'a> { unimplemented!() } } diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index b37c5aa7ca2e8..aacae0ff7a0d9 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -296,7 +296,7 @@ mod tests { let b2 = ExtrinsicAddress::from_str("0 0"); let b3 = ExtrinsicAddress::from_str("0x0012345f"); - assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); + assert_eq!(e0, Ok(ExtrinsicAddress::Bytes(vec![0x12, 0x34]))); assert_eq!( b0, Ok(ExtrinsicAddress::Block( @@ -305,7 +305,7 @@ mod tests { )) ); assert_eq!(b1, Ok(ExtrinsicAddress::Block(BlockAddress::Number(1234), 0))); - assert_eq!(b2, Ok(ExtrinsicAddress::Block(BlockAddress::Number(0), 0))); + assert_eq!(b2, Ok(ExtrinsicAddress::Bytes(vec![0, 0]))); assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); } } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 7f564ae642433..b21038b77564f 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -17,9 +17,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } hash-db = "0.15.2" -kvdb = "0.11.0" -kvdb-memorydb = "0.11.0" -kvdb-rocksdb = { version = "0.15.2", optional = true } +kvdb = "0.12.0" +kvdb-memorydb = "0.12.0" +kvdb-rocksdb = { version = "0.16.0", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" parity-db = "0.3.16" @@ -36,7 +36,7 @@ sp-trie = { version = "6.0.0", path = "../../primitives/trie" } [dev-dependencies] criterion = "0.3.3" -kvdb-rocksdb = "0.15.1" +kvdb-rocksdb = "0.16.0" rand = "0.8.4" tempfile = "3.1.0" quickcheck = { version = "1.0.3", default-features = false } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 292905663a20b..51750bf689759 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -115,7 +115,7 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> Upgra /// 2) transactions column is added; fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); - let db = Database::open(&db_cfg, db_path)?; + let mut db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) } @@ -126,7 +126,10 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> Upgr let db = Database::open(&db_cfg, db_path)?; // Get all the keys we need to update - let keys: Vec<_> = db.iter(columns::JUSTIFICATIONS).map(|entry| entry.0).collect(); + let keys: Vec<_> = db + .iter(columns::JUSTIFICATIONS) + .map(|r| r.map(|e| e.0)) + .collect::>()?; // Read and update each entry let mut transaction = db.transaction(); @@ -152,7 +155,7 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> Upgr /// 2) BODY_INDEX column is added; fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V3_NUM_COLUMNS); - let db = Database::open(&db_cfg, db_path)?; + let mut db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) } diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index b3ac5d892fd58..073199d005fd1 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -17,7 +17,7 @@ ansi_term = "0.12.1" futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index e46c65cf018f5..42bd2ae7276e2 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -80,7 +80,7 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } -parity-util-mem = { version = "0.11.0", default-features = false, features = [ +parity-util-mem = { version = "0.12.0", default-features = false, features = [ "primitive-types", ] } async-trait = "0.1.57" diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 4243968ec79b4..7f9a502aef8e9 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } log = "0.4.17" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index cd7cb297e8c4a..5e005f5523ae8 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -18,7 +18,7 @@ futures = "0.3.21" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = "0.4.17" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } parking_lot = "0.12.1" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 98db3aa6aa49d..be9cb1f1bf316 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -44,7 +44,7 @@ serde_json = "1.0.85" assert_matches = "1.3.0" pretty_assertions = "1.2.1" frame-system = { version = "4.0.0-dev", path = "../system" } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } [features] default = ["std"] diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index d7046b3254699..60eac2247e830 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -28,7 +28,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] criterion = "0.3" -primitive-types = "0.11.1" +primitive-types = "0.12.0" sp-core = { version = "6.0.0", features = ["full_crypto"], path = "../core" } rand = "0.7.2" diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 990106f990323..b1ffa746b6b63 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5.49" num-bigint = "0.2" -primitive-types = "0.11.1" +primitive-types = "0.12.0" sp-arithmetic = { version = "5.0.0", path = ".." } [[bin]] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 65d989a74c030..b7bc6dfdce496 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -21,8 +21,8 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" log = { version = "0.4.17", default-features = false } serde = { version = "1.0.136", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } -primitive-types = { version = "0.11.1", default-features = false, features = ["codec", "scale-info"] } -impl-serde = { version = "0.3.0", optional = true } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } +impl-serde = { version = "0.4.0", optional = true } wasmi = { version = "0.13", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } @@ -40,7 +40,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.30", optional = true } diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index f4eb3a19ac36c..4bf914bde2ee1 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -35,7 +35,6 @@ mod tests { ($name::from(2), "0x2"), ($name::from(10), "0xa"), ($name::from(15), "0xf"), - ($name::from(15), "0xf"), ($name::from(16), "0x10"), ($name::from(1_000), "0x3e8"), ($name::from(100_000), "0x186a0"), @@ -52,8 +51,6 @@ mod tests { assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } }; } diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index a3f09536f4f5c..f19a647fed032 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -11,5 +11,5 @@ documentation = "https://docs.rs/sp-database" readme = "README.md" [dependencies] -kvdb = "0.11.0" +kvdb = "0.12.0" parking_lot = "0.12.1" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 51367d40d0cd0..e7f0cee3f140f 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime-interface-proc-macro = { version = "5.0.0", path = "proc-macro" } sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" -primitive-types = { version = "0.11.1", default-features = false } +primitive-types = { version = "0.12.0", default-features = false } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } impl-trait-for-tuples = "0.2.2" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 1d1a7a2c38b1d..01594ed69e312 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -19,7 +19,7 @@ either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } paste = "1.0" rand = { version = "0.7.2", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index b37a4eb4b331d..d04a88d129d34 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -impl-serde = { version = "0.3.1", optional = true } +impl-serde = { version = "0.4.0", optional = true } ref-cast = "1.0.0" serde = { version = "1.0.136", features = ["derive"], optional = true } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 2a20addf66b2b..28fa6e6213daf 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 6d2d57b590e6a..2636648f40387 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -24,7 +24,7 @@ hashbrown = { version = "0.12.3", optional = true } hash-db = { version = "0.15.2", default-features = false } lazy_static = { version = "1.4.0", optional = true } lru = { version = "0.7.5", optional = true } -memory-db = { version = "0.29.0", default-features = false } +memory-db = { version = "0.30.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } @@ -38,7 +38,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] array-bytes = "4.1" criterion = "0.3.3" -trie-bench = "0.31.0" +trie-bench = "0.32.0" trie-standardmap = "0.15.2" sp-runtime = { version = "6.0.0", path = "../runtime" } diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 1750ebb8cd90b..0dcbbd81fd93f 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -impl-serde = { version = "0.3.1", optional = true } +impl-serde = { version = "0.4.0", optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 744cc527e6012..698351cd69f64 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "6.0.0", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.29.0", default-features = false } +memory-db = { version = "0.30.0", default-features = false } sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../primitives/offchain" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -42,7 +42,7 @@ sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = sp-trie = { version = "6.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.24.0", default-features = false } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } sp-state-machine = { version = "0.12.0", default-features = false, path = "../../primitives/state-machine" } sp-externalities = { version = "0.12.0", default-features = false, path = "../../primitives/externalities" } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 108d0d338c2b3..8eedcb870a3d0 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -22,11 +22,11 @@ handlebars = "4.2.2" hash-db = "0.15.2" Inflector = "0.11.4" itertools = "0.10.3" -kvdb = "0.11.0" +kvdb = "0.12.0" lazy_static = "1.4.0" linked-hash-map = "0.5.4" log = "0.4.17" -memory-db = "0.29.0" +memory-db = "0.30.0" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" serde = "1.0.136" From f447beec6eefbf452520b90cb0d199eaaf114342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 6 Oct 2022 10:11:53 +0200 Subject: [PATCH 46/75] Use `Option` for contract dry-runs (#12429) --- Cargo.lock | 14 +---- Cargo.toml | 1 - bin/node/runtime/Cargo.toml | 2 - bin/node/runtime/src/lib.rs | 15 +++-- frame/contracts/Cargo.toml | 1 + frame/contracts/runtime-api/Cargo.toml | 34 ----------- frame/contracts/runtime-api/README.md | 7 --- frame/contracts/runtime-api/src/lib.rs | 85 -------------------------- frame/contracts/src/lib.rs | 57 ++++++++++++++++- 9 files changed, 65 insertions(+), 151 deletions(-) delete mode 100644 frame/contracts/runtime-api/Cargo.toml delete mode 100644 frame/contracts/runtime-api/README.md delete mode 100644 frame/contracts/runtime-api/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 52b74628f07d3..9136df1cd9299 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3358,7 +3358,6 @@ dependencies = [ "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", - "pallet-contracts-runtime-api", "pallet-conviction-voting", "pallet-democracy", "pallet-election-provider-multi-phase", @@ -5487,6 +5486,7 @@ dependencies = [ "scale-info", "serde", "smallvec", + "sp-api", "sp-core", "sp-io", "sp-keystore", @@ -5518,18 +5518,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pallet-contracts-runtime-api" -version = "4.0.0-dev" -dependencies = [ - "pallet-contracts-primitives", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 02bc6aede8669..e203cbbee7e0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,7 +87,6 @@ members = [ "frame/collective", "frame/contracts", "frame/contracts/primitives", - "frame/contracts/runtime-api", "frame/conviction-voting", "frame/democracy", "frame/fast-unstake", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index ac3afc19da50f..6940e968e28e7 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -62,7 +62,6 @@ pallet-child-bounties = { version = "4.0.0-dev", default-features = false, path pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } -pallet-contracts-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/runtime-api/" } pallet-conviction-voting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/conviction-voting" } pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } @@ -139,7 +138,6 @@ std = [ "pallet-collective/std", "pallet-contracts/std", "pallet-contracts-primitives/std", - "pallet-contracts-runtime-api/std", "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d10448cc2d183..4a35b972ff7de 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1939,33 +1939,32 @@ impl_runtime_apis! { } } - impl pallet_contracts_runtime_api::ContractsApi< - Block, AccountId, Balance, BlockNumber, Hash, - > - for Runtime + impl pallet_contracts::ContractsApi for Runtime { fn call( origin: AccountId, dest: AccountId, value: Balance, - gas_limit: u64, + gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, ) -> pallet_contracts_primitives::ContractExecResult { - Contracts::bare_call(origin, dest, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, input_data, true) + let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); + Contracts::bare_call(origin, dest, value, gas_limit, storage_deposit_limit, input_data, true) } fn instantiate( origin: AccountId, value: Balance, - gas_limit: u64, + gas_limit: Option, storage_deposit_limit: Option, code: pallet_contracts_primitives::Code, data: Vec, salt: Vec, ) -> pallet_contracts_primitives::ContractInstantiateResult { - Contracts::bare_instantiate(origin, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, code, data, salt, true) + let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); + Contracts::bare_instantiate(origin, value, gas_limit, storage_deposit_limit, code, data, salt, true) } fn upload_code( diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 7c3b677e06436..7483ec8935890 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -38,6 +38,7 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "primitives" } pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/contracts/runtime-api/Cargo.toml b/frame/contracts/runtime-api/Cargo.toml deleted file mode 100644 index 05b0e05d4c568..0000000000000 --- a/frame/contracts/runtime-api/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "pallet-contracts-runtime-api" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Runtime API definition used to provide dry-run capabilities" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } - -# Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../primitives" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } - -[features] -default = ["std"] -std = [ - "sp-api/std", - "codec/std", - "scale-info/std", - "sp-std/std", - "sp-runtime/std", - "pallet-contracts-primitives/std", -] diff --git a/frame/contracts/runtime-api/README.md b/frame/contracts/runtime-api/README.md deleted file mode 100644 index fed285b23b2ac..0000000000000 --- a/frame/contracts/runtime-api/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Runtime API definition used to provide dry-run capabilities - -This API should be imported and implemented by the runtime, -of a node that wants to provide clients with dry-run -capabilities. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/runtime-api/src/lib.rs b/frame/contracts/runtime-api/src/lib.rs deleted file mode 100644 index 79fd20c8c0163..0000000000000 --- a/frame/contracts/runtime-api/src/lib.rs +++ /dev/null @@ -1,85 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Runtime API definition used to provide dry-run capabilities. -//! -//! This API should be imported and implemented by the runtime, -//! of a node that wants to provide clients with dry-run -//! capabilities. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::Codec; -use pallet_contracts_primitives::{ - Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, GetStorageResult, -}; -use sp_std::vec::Vec; - -sp_api::decl_runtime_apis! { - /// The API to interact with contracts without using executive. - pub trait ContractsApi where - AccountId: Codec, - Balance: Codec, - BlockNumber: Codec, - Hash: Codec, - { - /// Perform a call from a specified account to a given contract. - /// - /// See `pallet_contracts::Pallet::call`. - fn call( - origin: AccountId, - dest: AccountId, - value: Balance, - gas_limit: u64, - storage_deposit_limit: Option, - input_data: Vec, - ) -> ContractExecResult; - - /// Instantiate a new contract. - /// - /// See `pallet_contracts::Pallet::instantiate`. - fn instantiate( - origin: AccountId, - value: Balance, - gas_limit: u64, - storage_deposit_limit: Option, - code: Code, - data: Vec, - salt: Vec, - ) -> ContractInstantiateResult; - - - /// Upload new code without instantiating a contract from it. - /// - /// See `pallet_contracts::Pallet::upload_code`. - fn upload_code( - origin: AccountId, - code: Vec, - storage_deposit_limit: Option, - ) -> CodeUploadResult; - - /// Query a given storage key in a given contract. - /// - /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the - /// specified account and `Ok(None)` if it doesn't. If the account specified by the address - /// doesn't exist, or doesn't have a contract then `Err` is returned. - fn get_storage( - address: AccountId, - key: Vec, - ) -> GetStorageResult; - } -} diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 0c90c3ff433b4..794b172cc6282 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -105,7 +105,7 @@ use crate::{ wasm::{OwnerInfo, PrefabWasmModule}, weights::WeightInfo, }; -use codec::{Encode, HasCompact}; +use codec::{Codec, Encode, HasCompact}; use frame_support::{ dispatch::{Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, ensure, @@ -1171,3 +1171,58 @@ where Weight::from(gas_limit).set_proof_size(u64::from(T::MaxCodeLen::get()) * 2) } } + +sp_api::decl_runtime_apis! { + /// The API used to dry-run contract interactions. + pub trait ContractsApi where + AccountId: Codec, + Balance: Codec, + BlockNumber: Codec, + Hash: Codec, + { + /// Perform a call from a specified account to a given contract. + /// + /// See [`crate::Pallet::bare_call`]. + fn call( + origin: AccountId, + dest: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + input_data: Vec, + ) -> ContractExecResult; + + /// Instantiate a new contract. + /// + /// See `[crate::Pallet::bare_instantiate]`. + fn instantiate( + origin: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + code: Code, + data: Vec, + salt: Vec, + ) -> ContractInstantiateResult; + + + /// Upload new code without instantiating a contract from it. + /// + /// See [`crate::Pallet::bare_upload_code`]. + fn upload_code( + origin: AccountId, + code: Vec, + storage_deposit_limit: Option, + ) -> CodeUploadResult; + + /// Query a given storage key in a given contract. + /// + /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the + /// specified account and `Ok(None)` if it doesn't. If the account specified by the address + /// doesn't exist, or doesn't have a contract then `Err` is returned. + fn get_storage( + address: AccountId, + key: Vec, + ) -> GetStorageResult; + } +} From 3cb5a4069974cdfae3f6aad18626e1dde19ce3fb Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Thu, 6 Oct 2022 12:20:27 +0300 Subject: [PATCH 47/75] Add pluggable BEEFY payload constructors (#12428) * primitives/beefy: move Payload to its own file * primitives/beefy: add Payload tests * primitives/beefy: add MmrRootProvider as custom BEEFY payload provider * client/beefy: use generic BEEFY 'PayloadProvider' * primitives/beefy: rename Payload::new to Payload::from_single_entry for clarity * fix visibility * fix cargo doc --- Cargo.lock | 1 + client/beefy/rpc/src/lib.rs | 5 +- client/beefy/src/communication/gossip.rs | 8 +- client/beefy/src/justification.rs | 4 +- client/beefy/src/lib.rs | 13 ++- client/beefy/src/tests.rs | 5 +- client/beefy/src/worker.rs | 90 ++++++------------- primitives/beefy/Cargo.toml | 2 + primitives/beefy/src/commitment.rs | 82 ++++-------------- primitives/beefy/src/lib.rs | 7 +- primitives/beefy/src/mmr.rs | 105 ++++++++++++++++++++++- primitives/beefy/src/payload.rs | 105 +++++++++++++++++++++++ primitives/beefy/src/witness.rs | 8 +- 13 files changed, 284 insertions(+), 151 deletions(-) create mode 100644 primitives/beefy/src/payload.rs diff --git a/Cargo.lock b/Cargo.lock index 9136df1cd9299..a6a7c09514325 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -539,6 +539,7 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-keystore", + "sp-mmr-primitives", "sp-runtime", "sp-std", ] diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 6f21abc616db8..d29ed433c38db 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -170,7 +170,7 @@ mod tests { communication::notification::BeefyVersionedFinalityProofSender, justification::BeefyVersionedFinalityProof, }; - use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; + use beefy_primitives::{known_payloads, Payload, SignedCommitment}; use codec::{Decode, Encode}; use jsonrpsee::{types::EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; @@ -266,7 +266,8 @@ mod tests { } fn create_finality_proof() -> BeefyVersionedFinalityProof { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment: beefy_primitives::Commitment { payload, diff --git a/client/beefy/src/communication/gossip.rs b/client/beefy/src/communication/gossip.rs index 6c41a2e48932a..520548b943f96 100644 --- a/client/beefy/src/communication/gossip.rs +++ b/client/beefy/src/communication/gossip.rs @@ -237,8 +237,7 @@ mod tests { use crate::keystore::{tests::Keyring, BeefyKeystore}; use beefy_primitives::{ - crypto::Signature, known_payload_ids, Commitment, MmrRootHash, Payload, VoteMessage, - KEY_TYPE, + crypto::Signature, known_payloads, Commitment, MmrRootHash, Payload, VoteMessage, KEY_TYPE, }; use super::*; @@ -348,7 +347,10 @@ mod tests { } fn dummy_vote(block_number: u64) -> VoteMessage { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, MmrRootHash::default().encode()); + let payload = Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + MmrRootHash::default().encode(), + ); let commitment = Commitment { payload, block_number, validator_set_id: 0 }; let signature = sign_commitment(&Keyring::Alice, &commitment); diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index d9be18593dac7..7243c692727f0 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -81,7 +81,7 @@ fn verify_with_validator_set( #[cfg(test)] pub(crate) mod tests { use beefy_primitives::{ - known_payload_ids, Commitment, Payload, SignedCommitment, VersionedFinalityProof, + known_payloads, Commitment, Payload, SignedCommitment, VersionedFinalityProof, }; use substrate_test_runtime_client::runtime::Block; @@ -94,7 +94,7 @@ pub(crate) mod tests { keys: &[Keyring], ) -> BeefyVersionedFinalityProof { let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 760fc753b18a3..1c61cac072207 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use beefy_primitives::{BeefyApi, MmrRootHash}; +use beefy_primitives::{BeefyApi, MmrRootHash, PayloadProvider}; use parking_lot::Mutex; use prometheus::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, Finalizer}; @@ -167,11 +167,13 @@ pub struct BeefyNetworkParams { } /// BEEFY gadget initialization parameters. -pub struct BeefyParams { +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend pub backend: Arc, + /// BEEFY Payload provider + pub payload_provider: P, /// Runtime Api Provider pub runtime: Arc, /// Local key store @@ -191,11 +193,12 @@ pub struct BeefyParams { /// Start the BEEFY gadget. /// /// This is a thin shim around running and awaiting a BEEFY worker. -pub async fn start_beefy_gadget(beefy_params: BeefyParams) +pub async fn start_beefy_gadget(beefy_params: BeefyParams) where B: Block, BE: Backend, C: Client + BlockBackend, + P: PayloadProvider, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, @@ -203,6 +206,7 @@ where let BeefyParams { client, backend, + payload_provider, runtime, key_store, network_params, @@ -249,6 +253,7 @@ where let worker_params = worker::WorkerParams { client, backend, + payload_provider, runtime, network, key_store: key_store.into(), @@ -261,7 +266,7 @@ where min_block_delta, }; - let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); + let worker = worker::BeefyWorker::<_, _, _, _, _, _>::new(worker_params); futures::future::join(worker.run(), on_demand_justifications_handler.run()).await; } diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 8057bd7cab7a5..24cf89acd5734 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -38,6 +38,7 @@ use sc_utils::notification::NotificationReceiver; use beefy_primitives::{ crypto::{AuthorityId, Signature}, + mmr::MmrRootProvider, BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; @@ -372,10 +373,12 @@ where justifications_protocol_name: on_demand_justif_handler.protocol_name(), _phantom: PhantomData, }; + let payload_provider = MmrRootProvider::new(api.clone()); let beefy_params = crate::BeefyParams { client: peer.client().as_client(), backend: peer.client().as_backend(), + payload_provider, runtime: api.clone(), key_store: Some(keystore), network_params, @@ -384,7 +387,7 @@ where prometheus_registry: None, on_demand_justifications_handler: on_demand_justif_handler, }; - let task = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} assert_send(&task); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 5bdc72357c412..a21807c8ee875 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -48,7 +48,7 @@ use sp_runtime::{ use beefy_primitives::{ crypto::{AuthorityId, Signature}, - known_payload_ids, BeefyApi, Commitment, ConsensusLog, MmrRootHash, Payload, SignedCommitment, + BeefyApi, Commitment, ConsensusLog, MmrRootHash, Payload, PayloadProvider, SignedCommitment, ValidatorSet, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, }; @@ -194,9 +194,10 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { +pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, + pub payload_provider: P, pub runtime: Arc, pub network: N, pub key_store: BeefyKeystore, @@ -210,10 +211,11 @@ pub(crate) struct WorkerParams { } /// A BEEFY worker plays the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities client: Arc, backend: Arc, + payload_provider: P, runtime: Arc, network: N, key_store: BeefyKeystore, @@ -243,11 +245,12 @@ pub(crate) struct BeefyWorker { voting_oracle: VoterOracle, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, C: Client, + P: PayloadProvider, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, @@ -258,10 +261,11 @@ where /// BEEFY pallet has been deployed on-chain. /// /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { + pub(crate) fn new(worker_params: WorkerParams) -> Self { let WorkerParams { client, backend, + payload_provider, runtime, key_store, network, @@ -282,6 +286,7 @@ where BeefyWorker { client: client.clone(), backend, + payload_provider, runtime, network, known_peers, @@ -299,17 +304,6 @@ where } } - /// Simple wrapper that gets MMR root from header digests or from client state. - fn get_mmr_root_digest(&self, header: &B::Header) -> Option { - find_mmr_root_digest::(header).or_else(|| { - self.runtime - .runtime_api() - .mmr_root(&BlockId::hash(header.hash())) - .ok() - .and_then(|r| r.ok()) - }) - } - /// Verify `active` validator set for `block` against the key store /// /// We want to make sure that we have _at least one_ key in our keystore that @@ -621,13 +615,12 @@ where }; let target_hash = target_header.hash(); - let mmr_root = if let Some(hash) = self.get_mmr_root_digest(&target_header) { + let payload = if let Some(hash) = self.payload_provider.payload(&target_header) { hash } else { warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", target_hash); return Ok(()) }; - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, mmr_root.encode()); let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if !rounds.should_self_vote(&(payload.clone(), target_number)) { @@ -917,20 +910,6 @@ where } } -/// Extract the MMR root hash from a digest in the given header, if it exists. -fn find_mmr_root_digest(header: &B::Header) -> Option -where - B: Block, -{ - let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); - - let filter = |log: ConsensusLog| match log { - ConsensusLog::MmrRoot(root) => Some(root), - _ => None, - }; - header.digest().convert_first(|l| l.try_to(id).and_then(filter)) -} - /// Scan the `header` digest log for a BEEFY validator set change. Return either the new /// validator set or `None` in case no validator set change has been signaled. fn find_authorities_change(header: &B::Header) -> Option> @@ -1016,8 +995,8 @@ pub(crate) mod tests { BeefyRPCLinks, }; + use beefy_primitives::{known_payloads, mmr::MmrRootProvider}; use futures::{executor::block_on, future::poll_fn, task::Poll}; - use sc_client_api::{Backend as BackendT, HeaderBackend}; use sc_network::NetworkService; use sc_network_test::{PeersFullClient, TestNetFactory}; @@ -1032,7 +1011,14 @@ pub(crate) mod tests { peer: &BeefyPeer, key: &Keyring, min_block_delta: u32, - ) -> BeefyWorker>> { + ) -> BeefyWorker< + Block, + Backend, + PeersFullClient, + MmrRootProvider, + TestApi, + Arc>, + > { let keystore = create_beefy_keystore(*key); let (to_rpc_justif_sender, from_voter_justif_stream) = @@ -1064,9 +1050,11 @@ pub(crate) mod tests { "/beefy/justifs/1".into(), known_peers.clone(), ); + let payload_provider = MmrRootProvider::new(api.clone()); let worker_params = crate::worker::WorkerParams { client: peer.client().as_client(), backend: peer.client().as_backend(), + payload_provider, runtime: api, key_store: Some(keystore).into(), known_peers, @@ -1078,7 +1066,7 @@ pub(crate) mod tests { network, on_demand_justifications, }; - BeefyWorker::<_, _, _, _, _>::new(worker_params) + BeefyWorker::<_, _, _, _, _, _>::new(worker_params) } #[test] @@ -1300,30 +1288,6 @@ pub(crate) mod tests { assert_eq!(extracted, Some(validator_set)); } - #[test] - fn extract_mmr_root_digest() { - let mut header = Header::new( - 1u32.into(), - Default::default(), - Default::default(), - Default::default(), - Digest::default(), - ); - - // verify empty digest shows nothing - assert!(find_mmr_root_digest::(&header).is_none()); - - let mmr_root_hash = H256::random(); - header.digest_mut().push(DigestItem::Consensus( - BEEFY_ENGINE_ID, - ConsensusLog::::MmrRoot(mmr_root_hash).encode(), - )); - - // verify validator set is correctly extracted from digest - let extracted = find_mmr_root_digest::(&header); - assert_eq!(extracted, Some(mmr_root_hash)); - } - #[test] fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; @@ -1363,7 +1327,7 @@ pub(crate) mod tests { let create_finality_proof = |block_num: NumberFor| { let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; @@ -1482,7 +1446,7 @@ pub(crate) mod tests { block_number: NumberFor, ) -> VoteMessage, AuthorityId, Signature> { let commitment = Commitment { - payload: Payload::new(*b"BF", vec![]), + payload: Payload::from_single_entry(*b"BF", vec![]), block_number, validator_set_id: 0, }; @@ -1574,7 +1538,7 @@ pub(crate) mod tests { // import/append BEEFY justification for session boundary block 10 let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: 10, validator_set_id: validator_set.id(), }; @@ -1608,7 +1572,7 @@ pub(crate) mod tests { // import/append BEEFY justification for block 12 let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: 12, validator_set_id: validator_set.id(), }; diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index 1c1afde36ce5c..fe6ce23337c86 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } +sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../merkle-mountain-range" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -33,6 +34,7 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", + "sp-mmr-primitives/std", "sp-runtime/std", "sp-std/std", ] diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index 0e22c8d56d937..5765ff3609dbb 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -19,61 +19,7 @@ use codec::{Decode, Encode, Error, Input}; use scale_info::TypeInfo; use sp_std::{cmp, prelude::*}; -use crate::ValidatorSetId; - -/// Id of different payloads in the [`Commitment`] data -pub type BeefyPayloadId = [u8; 2]; - -/// Registry of all known [`BeefyPayloadId`]. -pub mod known_payload_ids { - use crate::BeefyPayloadId; - - /// A [`Payload`](super::Payload) identifier for Merkle Mountain Range root hash. - /// - /// Encoded value should contain a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). - pub const MMR_ROOT_ID: BeefyPayloadId = *b"mh"; -} - -/// A BEEFY payload type allowing for future extensibility of adding additional kinds of payloads. -/// -/// The idea is to store a vector of SCALE-encoded values with an extra identifier. -/// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected -/// value. Duplicated identifiers are disallowed. It's okay for different implementations to only -/// support a subset of possible values. -#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] -pub struct Payload(Vec<(BeefyPayloadId, Vec)>); - -impl Payload { - /// Construct a new payload given an initial vallue - pub fn new(id: BeefyPayloadId, value: Vec) -> Self { - Self(vec![(id, value)]) - } - - /// Returns a raw payload under given `id`. - /// - /// If the [`BeefyPayloadId`] is not found in the payload `None` is returned. - pub fn get_raw(&self, id: &BeefyPayloadId) -> Option<&Vec> { - let index = self.0.binary_search_by(|probe| probe.0.cmp(id)).ok()?; - Some(&self.0[index].1) - } - - /// Returns a decoded payload value under given `id`. - /// - /// In case the value is not there or it cannot be decoded does not match `None` is returned. - pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { - self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) - } - - /// Push a `Vec` with a given id into the payload vec. - /// This method will internally sort the payload vec after every push. - /// - /// Returns self to allow for daisy chaining. - pub fn push_raw(mut self, id: BeefyPayloadId, value: Vec) -> Self { - self.0.push((id, value)); - self.0.sort_by_key(|(id, _)| *id); - self - } -} +use crate::{Payload, ValidatorSetId}; /// A commitment signed by GRANDPA validators as part of BEEFY protocol. /// @@ -302,13 +248,11 @@ impl From> for VersionedFinalityProof { #[cfg(test)] mod tests { - use sp_core::{keccak_256, Pair}; - use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; - use super::*; + use crate::{crypto, known_payloads, KEY_TYPE}; use codec::Decode; - - use crate::{crypto, KEY_TYPE}; + use sp_core::{keccak_256, Pair}; + use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; type TestCommitment = Commitment; type TestSignedCommitment = SignedCommitment; @@ -341,7 +285,8 @@ mod tests { #[test] fn commitment_encode_decode() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -362,7 +307,8 @@ mod tests { #[test] fn signed_commitment_encode_decode() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -396,7 +342,8 @@ mod tests { #[test] fn signed_commitment_count_signatures() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -421,7 +368,8 @@ mod tests { block_number: u128, validator_set_id: crate::ValidatorSetId, ) -> TestCommitment { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); Commitment { payload, block_number, validator_set_id } } @@ -441,7 +389,8 @@ mod tests { #[test] fn versioned_commitment_encode_decode() { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -467,7 +416,8 @@ mod tests { #[test] fn large_signed_commitment_encode_decode() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs index 87f1b8756af65..705366e1b4778 100644 --- a/primitives/beefy/src/lib.rs +++ b/primitives/beefy/src/lib.rs @@ -33,12 +33,11 @@ mod commitment; pub mod mmr; +mod payload; pub mod witness; -pub use commitment::{ - known_payload_ids, BeefyPayloadId, Commitment, Payload, SignedCommitment, - VersionedFinalityProof, -}; +pub use commitment::{Commitment, SignedCommitment, VersionedFinalityProof}; +pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider}; use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 471cb96841b8e..b479d979f13f3 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -17,8 +17,8 @@ //! BEEFY + MMR utilties. //! -//! While BEEFY can be used completely indepentently as an additional consensus gadget, -//! it is designed around a main use case of making bridging standalone networks together. +//! While BEEFY can be used completely independently as an additional consensus gadget, +//! it is designed around a main use case of bridging standalone networks together. //! For that use case it's common to use some aggregated data structure (like MMR) to be //! used in conjunction with BEEFY, to be able to efficiently prove any past blockchain data. //! @@ -26,9 +26,13 @@ //! but we imagine they will be useful for other chains that either want to bridge with Polkadot //! or are completely standalone, but heavily inspired by Polkadot. -use crate::Vec; +use crate::{crypto::AuthorityId, ConsensusLog, MmrRootHash, Vec, BEEFY_ENGINE_ID}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Block, Header}, +}; /// A provider for extra data that gets added to the Mmr leaf pub trait BeefyDataProvider { @@ -121,9 +125,78 @@ pub struct BeefyAuthoritySet { /// Details of the next BEEFY authority set. pub type BeefyNextAuthoritySet = BeefyAuthoritySet; +/// Extract the MMR root hash from a digest in the given header, if it exists. +pub fn find_mmr_root_digest(header: &B::Header) -> Option { + let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); + + let filter = |log: ConsensusLog| match log { + ConsensusLog::MmrRoot(root) => Some(root), + _ => None, + }; + header.digest().convert_first(|l| l.try_to(id).and_then(filter)) +} + +#[cfg(feature = "std")] +pub use mmr_root_provider::MmrRootProvider; +#[cfg(feature = "std")] +mod mmr_root_provider { + use super::*; + use crate::{known_payloads, payload::PayloadProvider, Payload}; + use sp_api::ProvideRuntimeApi; + use sp_mmr_primitives::MmrApi; + use sp_runtime::generic::BlockId; + use sp_std::{marker::PhantomData, sync::Arc}; + + /// A [`crate::Payload`] provider where payload is Merkle Mountain Range root hash. + /// + /// Encoded payload contains a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). + pub struct MmrRootProvider { + runtime: Arc, + _phantom: PhantomData, + } + + impl MmrRootProvider + where + B: Block, + R: ProvideRuntimeApi, + R::Api: MmrApi, + { + /// Create new BEEFY Payload provider with MMR Root as payload. + pub fn new(runtime: Arc) -> Self { + Self { runtime, _phantom: PhantomData } + } + + /// Simple wrapper that gets MMR root from header digests or from client state. + fn mmr_root_from_digest_or_runtime(&self, header: &B::Header) -> Option { + find_mmr_root_digest::(header).or_else(|| { + self.runtime + .runtime_api() + .mmr_root(&BlockId::hash(header.hash())) + .ok() + .and_then(|r| r.ok()) + }) + } + } + + impl PayloadProvider for MmrRootProvider + where + B: Block, + R: ProvideRuntimeApi, + R::Api: MmrApi, + { + fn payload(&self, header: &B::Header) -> Option { + self.mmr_root_from_digest_or_runtime(header).map(|mmr_root| { + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, mmr_root.encode()) + }) + } + } +} + #[cfg(test)] mod tests { use super::*; + use crate::H256; + use sp_runtime::{traits::BlakeTwo256, Digest, DigestItem, OpaqueExtrinsic}; #[test] fn should_construct_version_correctly() { @@ -147,4 +220,30 @@ mod tests { fn should_panic_if_minor_too_large() { MmrLeafVersion::new(0, 32); } + + #[test] + fn extract_mmr_root_digest() { + type Header = sp_runtime::generic::Header; + type Block = sp_runtime::generic::Block; + let mut header = Header::new( + 1u64, + Default::default(), + Default::default(), + Default::default(), + Digest::default(), + ); + + // verify empty digest shows nothing + assert!(find_mmr_root_digest::(&header).is_none()); + + let mmr_root_hash = H256::random(); + header.digest_mut().push(DigestItem::Consensus( + BEEFY_ENGINE_ID, + ConsensusLog::::MmrRoot(mmr_root_hash).encode(), + )); + + // verify validator set is correctly extracted from digest + let extracted = find_mmr_root_digest::(&header); + assert_eq!(extracted, Some(mmr_root_hash)); + } } diff --git a/primitives/beefy/src/payload.rs b/primitives/beefy/src/payload.rs new file mode 100644 index 0000000000000..0f23c3f381f19 --- /dev/null +++ b/primitives/beefy/src/payload.rs @@ -0,0 +1,105 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::traits::Block; +use sp_std::prelude::*; + +/// Id of different payloads in the [`crate::Commitment`] data. +pub type BeefyPayloadId = [u8; 2]; + +/// Registry of all known [`BeefyPayloadId`]. +pub mod known_payloads { + use crate::BeefyPayloadId; + + /// A [`Payload`](super::Payload) identifier for Merkle Mountain Range root hash. + /// + /// Encoded value should contain a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). + pub const MMR_ROOT_ID: BeefyPayloadId = *b"mh"; +} + +/// A BEEFY payload type allowing for future extensibility of adding additional kinds of payloads. +/// +/// The idea is to store a vector of SCALE-encoded values with an extra identifier. +/// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected +/// value. Duplicated identifiers are disallowed. It's okay for different implementations to only +/// support a subset of possible values. +#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] +pub struct Payload(Vec<(BeefyPayloadId, Vec)>); + +impl Payload { + /// Construct a new payload given an initial vallue + pub fn from_single_entry(id: BeefyPayloadId, value: Vec) -> Self { + Self(vec![(id, value)]) + } + + /// Returns a raw payload under given `id`. + /// + /// If the [`BeefyPayloadId`] is not found in the payload `None` is returned. + pub fn get_raw(&self, id: &BeefyPayloadId) -> Option<&Vec> { + let index = self.0.binary_search_by(|probe| probe.0.cmp(id)).ok()?; + Some(&self.0[index].1) + } + + /// Returns a decoded payload value under given `id`. + /// + /// In case the value is not there or it cannot be decoded does not match `None` is returned. + pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { + self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) + } + + /// Push a `Vec` with a given id into the payload vec. + /// This method will internally sort the payload vec after every push. + /// + /// Returns self to allow for daisy chaining. + pub fn push_raw(mut self, id: BeefyPayloadId, value: Vec) -> Self { + self.0.push((id, value)); + self.0.sort_by_key(|(id, _)| *id); + self + } +} + +/// Trait for custom BEEFY payload providers. +pub trait PayloadProvider { + /// Provide BEEFY payload if available for `header`. + fn payload(&self, header: &B::Header) -> Option; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn payload_methods_work_as_expected() { + let id1: BeefyPayloadId = *b"hw"; + let msg1: String = "1. Hello World!".to_string(); + let id2: BeefyPayloadId = *b"yb"; + let msg2: String = "2. Yellow Board!".to_string(); + let id3: BeefyPayloadId = *b"cs"; + let msg3: String = "3. Cello Cord!".to_string(); + + let payload = Payload::from_single_entry(id1, msg1.encode()) + .push_raw(id2, msg2.encode()) + .push_raw(id3, msg3.encode()); + + assert_eq!(payload.get_decoded(&id1), Some(msg1)); + assert_eq!(payload.get_decoded(&id2), Some(msg2)); + assert_eq!(payload.get_raw(&id3), Some(&msg3.encode())); + assert_eq!(payload.get_raw(&known_payloads::MMR_ROOT_ID), None); + } +} diff --git a/primitives/beefy/src/witness.rs b/primitives/beefy/src/witness.rs index cebfc3de85049..2c45e0ade90c4 100644 --- a/primitives/beefy/src/witness.rs +++ b/primitives/beefy/src/witness.rs @@ -81,7 +81,7 @@ mod tests { use super::*; use codec::Decode; - use crate::{crypto, known_payload_ids, Payload, KEY_TYPE}; + use crate::{crypto, known_payloads, Payload, KEY_TYPE}; type TestCommitment = Commitment; type TestSignedCommitment = SignedCommitment; @@ -111,8 +111,10 @@ mod tests { } fn signed_commitment() -> TestSignedCommitment { - let payload = - Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".as_bytes().to_vec()); + let payload = Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + "Hello World!".as_bytes().to_vec(), + ); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; From a84def97102166643bc3c807ae69892551c47536 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Thu, 6 Oct 2022 14:12:51 +0200 Subject: [PATCH 48/75] Maximum value for `MultiplierUpdate` (#12282) * Maximum value for MultiplierUpdate * Update frame/transaction-payment/src/lib.rs Co-authored-by: Stephen Shelton * Update lib.rs * return constant * fix in runtime * Update frame/transaction-payment/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/transaction-payment/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fixes * remove unused import * Update lib.rs * more readable * fix * fix nits Co-authored-by: Stephen Shelton Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- bin/node/runtime/src/impls.rs | 5 +++-- bin/node/runtime/src/lib.rs | 12 +++++++++--- frame/transaction-payment/src/lib.rs | 26 ++++++++++++++++++++------ 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 0f9ed6e275196..0a5c797ba729f 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -126,8 +126,8 @@ mod multiplier_tests { use crate::{ constants::{currency::*, time::*}, - AdjustmentVariable, MinimumMultiplier, Runtime, RuntimeBlockWeights as BlockWeights, - System, TargetBlockFullness, TransactionPayment, + AdjustmentVariable, MaximumMultiplier, MinimumMultiplier, Runtime, + RuntimeBlockWeights as BlockWeights, System, TargetBlockFullness, TransactionPayment, }; use frame_support::{ dispatch::DispatchClass, @@ -156,6 +156,7 @@ mod multiplier_tests { TargetBlockFullness, AdjustmentVariable, MinimumMultiplier, + MaximumMultiplier, >::convert(fm) } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4a35b972ff7de..34f6988c31643 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -65,7 +65,7 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - self, BlakeTwo256, Block as BlockT, ConvertInto, NumberFor, OpaqueKeys, + self, BlakeTwo256, Block as BlockT, Bounded, ConvertInto, NumberFor, OpaqueKeys, SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, @@ -443,6 +443,7 @@ parameter_types! { pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(1, 100_000); pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); + pub MaximumMultiplier: Multiplier = Bounded::max_value(); } impl pallet_transaction_payment::Config for Runtime { @@ -451,8 +452,13 @@ impl pallet_transaction_payment::Config for Runtime { type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = - TargetedFeeAdjustment; + type FeeMultiplierUpdate = TargetedFeeAdjustment< + Self, + TargetBlockFullness, + AdjustmentVariable, + MinimumMultiplier, + MaximumMultiplier, + >; } impl pallet_asset_tx_payment::Config for Runtime { diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 80297d1a0d362..ce85bf93a90a7 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -127,12 +127,14 @@ type BalanceOf = <::OnChargeTransaction as OnChargeTransaction -pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); +pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M, X)>); /// Something that can convert the current multiplier to the next one. pub trait MultiplierUpdate: Convert { - /// Minimum multiplier + /// Minimum multiplier. Any outcome of the `convert` function should be at least this. fn min() -> Multiplier; + /// Maximum multiplier. Any outcome of the `convert` function should be less or equal this. + fn max() -> Multiplier; /// Target block saturation level fn target() -> Perquintill; /// Variability factor @@ -143,6 +145,9 @@ impl MultiplierUpdate for () { fn min() -> Multiplier { Default::default() } + fn max() -> Multiplier { + ::max_value() + } fn target() -> Perquintill { Default::default() } @@ -151,16 +156,20 @@ impl MultiplierUpdate for () { } } -impl MultiplierUpdate for TargetedFeeAdjustment +impl MultiplierUpdate for TargetedFeeAdjustment where T: frame_system::Config, S: Get, V: Get, M: Get, + X: Get, { fn min() -> Multiplier { M::get() } + fn max() -> Multiplier { + X::get() + } fn target() -> Perquintill { S::get() } @@ -169,18 +178,20 @@ where } } -impl Convert for TargetedFeeAdjustment +impl Convert for TargetedFeeAdjustment where T: frame_system::Config, S: Get, V: Get, M: Get, + X: Get, { fn convert(previous: Multiplier) -> Multiplier { // Defensive only. The multiplier in storage should always be at most positive. Nonetheless // we recover here in case of errors, because any value below this would be stale and can // never change. let min_multiplier = M::get(); + let max_multiplier = X::get(); let previous = previous.max(min_multiplier); let weights = T::BlockWeights::get(); @@ -217,11 +228,11 @@ where if positive { let excess = first_term.saturating_add(second_term).saturating_mul(previous); - previous.saturating_add(excess).max(min_multiplier) + previous.saturating_add(excess).max(min_multiplier).min(max_multiplier) } else { // Defensive-only: first_term > second_term. Safe subtraction. let negative = first_term.saturating_sub(second_term).saturating_mul(previous); - previous.saturating_sub(negative).max(min_multiplier) + previous.saturating_sub(negative).max(min_multiplier).min(max_multiplier) } } } @@ -233,6 +244,9 @@ impl> MultiplierUpdate for ConstFeeMultiplier { fn min() -> Multiplier { M::get() } + fn max() -> Multiplier { + M::get() + } fn target() -> Perquintill { Default::default() } From f77f788c8cd8692f716a54f13c434a8774246d7a Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Thu, 6 Oct 2022 16:31:56 -0400 Subject: [PATCH 49/75] macro stubs for all pallet:: macros to improve documentation visibility and discovery + revamp of pallet macro documentation (#12334) * proof of concept working for pallet::whitelist_storage * fix comments * pallet macros docs rewrite WIP * fix issue with cargo fmt cobbling links * tweak capitalization * fix docs for storage_version * fix docs for pallet::hooks * fix several comments * fix invalid link * fix wrapping and add missing links for pallet::hooks docs * run rewrap on all text blocks in frame_support::pallet docs * cargo fmt * fix up pallet::call_index docs * fix docs for pallet::extra_constants * fix docs for pallet::error * fix docs for pallet::event * fix docs for pallet::event * * fix docs for pallet::storage * fix docs for pallet::getter * fix docs for pallet::storage_prefix * fix docs for pallet::unbounded * fix docs for pallet::whitelist_storage * fix docs for #[cfg(..)] (for storage items and attributes) * fix docs for pallet::storage macro expansion * fix docs for pallet::type_value * fix docs for pallet::genesis_config * fix docs for pallet::genesis_build * fix docs for pallet::inherent * fix docs for pallet::validate_unsigned * fix docs for pallet::origin * fix docs for general notes on instantiable pallets * fix docs for example of a non-instantiable pallet * fix docs for example of an instantiable pallet * fix docs for upgrade guidelines * fix docs for upgrade guidelines * fix docs for upgrade checking and final notes * fix some examples near the beginning * extract docs for `pallet::whitelist_storage` * add docs for pallet_macro_stub * fix order of pallet::config and pallet::constant * set up stub for pallet::config * set up stub for pallet::constant * fix * set up stub for pallet::disable_frame_system_supertrait_check * set up stub for pallet::generate_storage_info * set up stub for pallet::storage_version * set up stub for pallet::hooks * set up stub for pallet::weight * set up stub for pallet::compact * set up stub for pallet::call_index * set up stub for pallet::extra_constants * set up stub for pallet::error * set up stub for pallet::event * set up stub for pallet::generate_deposit * set up stub for pallet::storage * set up stub for pallet::getter * set up stub for pallet::storage_prefix * set up stub for pallet::unbounded * set up stub for pallet::type_value * set up stub for pallet::genesis_config * set up stub for pallet::genesis_build * set up stub for pallet::inherent * set up stub for pallet::validate_unsigned * set up stub for pallet::origin * fix comment * cargo fmt * tweak error message * Update frame/support/procedural/src/lib.rs Co-authored-by: Keith Yeung * Update frame/support/procedural/src/lib.rs Co-authored-by: Keith Yeung * switch order of derives Co-authored-by: Squirrel * tweak wording Co-authored-by: Squirrel * add more context info about `MAX_MODULE_ERROR_ENCODED_SIZE` Co-authored-by: Squirrel * tweak wording about where clause Co-authored-by: Squirrel * clarify wording about system/events key Co-authored-by: Squirrel * use "The Event enum" instead of "item" Co-authored-by: Squirrel * fix bad wording Co-authored-by: Squirrel * use enum instead of type Co-authored-by: Squirrel * expect => expects Co-authored-by: Squirrel * add additional note about storage prefix Co-authored-by: Squirrel * clearer note about GenesisConfig Co-authored-by: Squirrel * Use "The impl" instead of "The item" Co-authored-by: Squirrel * add note and link to tight-coupling docs Co-authored-by: Squirrel * cargo fmt * remove spaces around parenthesis * fix missing text for pallet::config * fix issue with pallet::constant intro * fix wording about codec index * fix pallet::error wording * fix comment about 1 byte => 256 errors * fix where clause comment * fix comment about where pallet events are stored * rewrap some text * fix pallet::storage docs * fix pallet::storage_prefix docs * tweak docs for pallet::genesis_build * tweak docs for pallet::config * specify that pallet::event must be present if pallet::config is present * add note about why we would want to bypass the supertrait check * mention that pallet::generate_store attribute is only valid on pallet struct * add note about adding new calls to the end to maintain existing order * add note about pallet::type_value and pallet::storage Co-authored-by: Squirrel * add note about using pallet::type_value alongside pallet::storage * include warning about modifying disaptchables on other pallet::call_index docs page * fix incorrect comment * add much more information for pallet::inherent * move pallet::pallet macro expansion notes back to their rightful place * re-run CI * fix macro expansion appearing in wrong place for pallet::pallet * replicate pallet::pallet docs on the pallet::pallet macro stub * force CI re-run Co-authored-by: Keith Yeung Co-authored-by: Squirrel Co-authored-by: parity-processbot <> --- frame/support/procedural/src/lib.rs | 680 +++++++++++++++++++- frame/support/src/lib.rs | 964 ++++++++++++++++------------ 2 files changed, 1223 insertions(+), 421 deletions(-) diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 0f72b28748cee..ccff5488c93be 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -36,6 +36,7 @@ mod transactional; mod tt_macro; use proc_macro::TokenStream; +use quote::quote; use std::{cell::RefCell, str::FromStr}; pub(crate) use storage::INHERENT_INSTANCE_NAME; @@ -402,7 +403,49 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } -/// Macro to define a pallet. Docs are at `frame_support::pallet`. +/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify +/// pallet information. +/// +/// The struct must be defined as follows: +/// ```ignore +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// ``` +/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. +/// +/// ## Macro expansion: +/// +/// The macro adds this attribute to the struct definition: +/// ```ignore +/// #[derive( +/// frame_support::CloneNoBound, +/// frame_support::EqNoBound, +/// frame_support::PartialEqNoBound, +/// frame_support::RuntimeDebugNoBound, +/// )] +/// ``` +/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: +/// * `GetStorageVersion` +/// * `OnGenesis`: contains some logic to write the pallet version into storage. +/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. +/// +/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. +/// +/// It implements `PalletInfoAccess` on `Pallet` to ease access to pallet information given by +/// `frame_support::traits::PalletInfo`. (The implementation uses the associated type +/// `frame_system::Config::PalletInfo`). +/// +/// It implements `StorageInfoTrait` on `Pallet` which give information about all storages. +/// +/// If the attribute `generate_store` is set then the macro creates the trait `Store` and +/// implements it on `Pallet`. +/// +/// If the attribute `set_storage_max_encoded_len` is set then the macro calls +/// `StorageInfoTrait` for each storage in the implementation of `StorageInfoTrait` for the +/// pallet. Otherwise it implements `StorageInfoTrait` for the pallet using the +/// `PartialStorageInfoTrait` implementation of storages. +/// +/// See `frame_support::pallet` docs for more info. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { pallet::pallet(attr, item) @@ -583,3 +626,638 @@ pub fn storage_alias(_: TokenStream, input: TokenStream) -> TokenStream { .unwrap_or_else(|r| r.into_compile_error()) .into() } + +/// Used internally to decorate pallet attribute macro stubs when they are erroneously used +/// outside of a pallet module +fn pallet_macro_stub() -> TokenStream { + quote!(compile_error!( + "This attribute can only be used from within a pallet module marked with `#[frame_support::pallet]`" + )) + .into() +} + +/// The mandatory attribute `#[pallet::config]` defines the configurable options for the pallet. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits +/// $optional_where_clause +/// { +/// ... +/// } +/// ``` +/// +/// I.e. a regular trait definition named `Config`, with the supertrait +/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. +/// (Specifying other supertraits here is known as [tight +/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) +/// +/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds +/// `From` and `IsType<::RuntimeEvent>`. +/// +/// [`pallet::event`](`macro@event`) must be present if `RuntimeEvent` exists as a config item +/// in your `#[pallet::config]`. +#[proc_macro_attribute] +pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by `Get` +/// from [`pallet::config`](`macro@config`) into metadata, e.g.: +/// +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] +/// type Foo: Get; +/// } +/// ``` +#[proc_macro_attribute] +pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// To bypass the `frame_system::Config` supertrait check, use the attribute +/// `pallet::disable_frame_system_supertrait_check`, e.g.: +/// +/// ```ignore +/// #[pallet::config] +/// #[pallet::disable_frame_system_supertrait_check] +/// pub trait Config: pallet_timestamp::Config {} +/// ``` +/// +/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you +/// want to write an alternative to the `frame_system` pallet. +#[proc_macro_attribute] +pub fn disable_frame_system_supertrait_check(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with +/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: +/// +/// ```ignore +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(_); +/// ``` +/// More precisely, the `Store` trait contains an associated type for each storage. It is +/// implemented for `Pallet` allowing access to the storage from pallet struct. +/// +/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using +/// `::Foo`. +/// +/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct +/// definition. +#[proc_macro_attribute] +pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// To generate the full storage info (used for PoV calculation) use the attribute +/// `#[pallet::generate_storage_info]`, e.g.: +/// +/// ```ignore +/// #[pallet::pallet] +/// #[pallet::generate_storage_info] +/// pub struct Pallet(_); +/// ``` +/// +/// This requires all storage items to implement the trait `StorageInfoTrait`, thus all keys +/// and value types must be bound by `MaxEncodedLen`. Individual storages can opt-out from this +/// constraint by using [`#[pallet::unbounded]`](`macro@unbounded`) (see +/// [`#[pallet::storage]`](`macro@storage`) for more info). +#[proc_macro_attribute] +pub fn generate_storage_info(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Because the `pallet::pallet` macro implements `GetStorageVersion`, the current storage +/// version needs to be communicated to the macro. This can be done by using the +/// `pallet::storage_version` attribute: +/// +/// ```ignore +/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); +/// +/// #[pallet::pallet] +/// #[pallet::storage_version(STORAGE_VERSION)] +/// pub struct Pallet(_); +/// ``` +/// +/// If not present, the current storage version is set to the default value. +#[proc_macro_attribute] +pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::hooks]` attribute allows you to specify a `Hooks` implementation for +/// `Pallet` that specifies pallet-specific logic. +/// +/// The item the attribute attaches to must be defined as follows: +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks> for Pallet $optional_where_clause { +/// ... +/// } +/// ``` +/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait +/// `Hooks>` (they are defined in preludes), for the type `Pallet` and +/// with an optional where clause. +/// +/// If no `#[pallet::hooks]` exists, then the following default implementation is +/// automatically generated: +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks> for Pallet {} +/// ``` +/// +/// ## Macro expansion +/// +/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, +/// `OffchainWorker`, and `IntegrityTest` using the provided `Hooks` implementation. +/// +/// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some +/// additional logic. E.g. logic to write the pallet version into storage. +/// +/// NOTE: The macro also adds some tracing logic when implementing the above traits. The +/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +#[proc_macro_attribute] +pub fn hooks(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the +/// first argument must be `origin: OriginFor`. +#[proc_macro_attribute] +pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must +/// return a `DispatchResultWithPostInfo` or `DispatchResult`. +#[proc_macro_attribute] +pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, +/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. +/// +/// All call indexes start from 0, until it encounters a dispatchable function with a defined +/// call index. The dispatchable function that lexically follows the function with a defined +/// call index will have that call index, but incremented by 1, e.g. if there are 3 +/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` +/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. +/// +/// All arguments must implement [`Debug`], [`PartialEq`], [`Eq`], `Decode`, `Encode`, and +/// [`Clone`]. For ease of use, bound by the trait `frame_support::pallet_prelude::Member`. +/// +/// If no `#[pallet::call]` exists, then a default implementation corresponding to the +/// following code is automatically generated: +/// +/// ```ignore +/// #[pallet::call] +/// impl Pallet {} +/// ``` +/// +/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be +/// done with care. Indeed this will change the outer runtime call type (which is an enum with +/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in +/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the +/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so +/// that the `Call` enum encoding does not change after modification. As a general rule of +/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain +/// the existing order of calls. +/// +/// ### Macro expansion +/// +/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: +/// [`Clone`], [`Eq`], [`PartialEq`], [`Debug`] (with stripped implementation in `not("std")`), +/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, and `UnfilteredDispatchable`. +/// +/// The macro implements the `Callable` trait on `Pallet` and a function `call_functions` +/// which returns the dispatchable metadata. +#[proc_macro_attribute] +pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Allows you to define some extra constants to be added into constant metadata. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::extra_constants] +/// impl Pallet where $optional_where_clause { +/// /// $some_doc +/// $vis fn $fn_name() -> $some_return_type { +/// ... +/// } +/// ... +/// } +/// ``` +/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, +/// 0 generics, and some return type. +/// +/// ## Macro expansion +/// +/// The macro add some extra constants to pallet constant metadata. +#[proc_macro_attribute] +pub fn extra_constants(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned +/// from the dispatchable when an error occurs. The information for this error type is then +/// stored in metadata. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::error] +/// pub enum Error { +/// /// $some_optional_doc +/// $SomeFieldLessVariant, +/// /// $some_more_optional_doc +/// $SomeVariantWithOneField(FieldType), +/// ... +/// } +/// ``` +/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field +/// variants. +/// +/// Any field type in the enum variants must implement `TypeInfo` in order to be properly used +/// in the metadata, and its encoded size should be as small as possible, preferably 1 byte in +/// size in order to reduce storage size. The error enum itself has an absolute maximum encoded +/// size specified by `MAX_MODULE_ERROR_ENCODED_SIZE`. +/// +/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to +/// diagnose problems and give a better experience to the user. Don't skimp on having lots of +/// individual error conditions.) +/// +/// Field types in enum variants must also implement `PalletError`, otherwise the pallet will +/// fail to compile. Rust primitive types have already implemented the `PalletError` trait +/// along with some commonly used stdlib types such as [`Option`] and `PhantomData`, and hence +/// in most use cases, a manual implementation is not necessary and is discouraged. +/// +/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, +/// bounds and/or a where clause should not needed for any use-case. +/// +/// ## Macro expansion +/// +/// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, and +/// `as_str` using variant doc. +/// +/// The macro also implements `From>` for `&'static str` and `From>` for +/// `DispatchError`. +#[proc_macro_attribute] +pub fn error(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::event]` attribute allows you to define pallet events. Pallet events are +/// stored under the `system` / `events` key when the block is applied (and then replaced when +/// the next block writes it's events). +/// +/// The Event enum must be defined as follows: +/// +/// ```ignore +/// #[pallet::event] +/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional +/// pub enum Event<$some_generic> $optional_where_clause { +/// /// Some doc +/// $SomeName($SomeType, $YetanotherType, ...), +/// ... +/// } +/// ``` +/// +/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or +/// `T` or `T: Config`, and optional w here clause. +/// +/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], `Encode`, `Decode`, and +/// [`Debug`] (on std only). For ease of use, bound by the trait `Member`, available in +/// `frame_support::pallet_prelude`. +#[proc_macro_attribute] +pub fn event(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a +/// helper function on `Pallet` that handles deposit events. +/// +/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. +/// +/// ## Macro expansion +/// +/// The macro will add on enum `Event` the attributes: +/// * `#[derive(frame_support::CloneNoBound)]` +/// * `#[derive(frame_support::EqNoBound)]` +/// * `#[derive(frame_support::PartialEqNoBound)]` +/// * `#[derive(frame_support::RuntimeDebugNoBound)]` +/// * `#[derive(codec::Encode)]` +/// * `#[derive(codec::Decode)]` +/// +/// The macro implements `From>` for (). +/// +/// The macro implements a metadata function on `Event` returning the `EventMetadata`. +/// +/// If `#[pallet::generate_deposit]` is present then the macro implements `fn deposit_event` on +/// `Pallet`. +#[proc_macro_attribute] +pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime +/// storage and also set its metadata. This attribute can be used multiple times. +/// +/// Item should be defined as: +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn $getter_name)] // optional +/// $vis type $StorageName<$some_generic> $optional_where_clause +/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; +/// ``` +/// +/// or with unnamed generic: +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn $getter_name)] // optional +/// $vis type $StorageName<$some_generic> $optional_where_clause +/// = $StorageType<_, $some_generics, ...>; +/// ``` +/// +/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be +/// one of `StorageValue`, `StorageMap` or `StorageDoubleMap`. The generic arguments of the +/// storage type can be given in two manners: named and unnamed. For named generic arguments, +/// the name for each argument should match the name defined for it on the storage struct: +/// * `StorageValue` expects `Value` and optionally `QueryKind` and `OnEmpty`, +/// * `StorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, +/// * `CountedStorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, +/// * `StorageDoubleMap` expects `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` and optionally +/// `QueryKind` and `OnEmpty`. +/// +/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the +/// macro and other generic must declared as a normal generic type declaration. +/// +/// The `Prefix` generic written by the macro is generated using +/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names +/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: +/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. +/// +/// For the `CountedStorageMap` variant, the `Prefix` also implements +/// `CountedStorageMapInstance`. It also associates a `CounterPrefix`, which is implemented the +/// same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. if runtime names +/// the pallet "MyExample" then the storage `type Foo = CountedStorageaMap<...>` will store +/// its counter at the prefix: `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. +/// +/// E.g: +/// +/// ```ignore +/// #[pallet::storage] +/// pub(super) type MyStorage = StorageMap; +/// ``` +/// +/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ +/// Twox128(b"OtherName")`. +#[proc_macro_attribute] +pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a +/// getter function on `Pallet`. +/// +/// Also see [`pallet::storage`](`macro@storage`) +#[proc_macro_attribute] +pub fn getter(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the +/// storage prefix to use. This is helpful if you wish to rename the storage field but don't +/// want to perform a migration. +/// +/// E.g: +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::storage_prefix = "foo"] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap; +/// ``` +/// +/// or +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; +/// ``` +#[proc_macro_attribute] +pub fn storage_prefix(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When +/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on +/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This +/// can be useful for storage which can never go into PoV (Proof of Validity). +#[proc_macro_attribute] +pub fn unbounded(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::whitelist_storage]` will declare the +/// storage as whitelisted from benchmarking. Doing so will exclude reads of +/// that value's storage key from counting towards weight calculations during +/// benchmarking. +/// +/// This attribute should only be attached to storages that are known to be +/// read/used in every block. This will result in a more accurate benchmarking weight. +/// +/// ### Example +/// ```ignore +/// #[pallet::storage] +/// #[pallet::whitelist_storage] +/// pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; +/// ``` +/// +/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as +/// `#[pallet::whitelist_storage]` and can only be placed inside a `pallet` module in order for +/// it to work properly. +#[proc_macro_attribute] +pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait +/// to ease the use of storage types. This attribute is meant to be used alongside +/// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute +/// can be used multiple times. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::type_value] +/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } +/// ``` +/// +/// I.e.: a function definition with generics none or `T: Config` and a returned type. +/// +/// E.g.: +/// +/// ```ignore +/// #[pallet::type_value] +/// fn MyDefault() -> T::Balance { 3.into() } +/// ``` +/// +/// ## Macro expansion +/// +/// The macro renames the function to some internal name, generates a struct with the original +/// name of the function and its generic, and implements `Get<$ReturnType>` by calling the user +/// defined function. +#[proc_macro_attribute] +pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration +/// for the pallet. +/// +/// Item is defined as either an enum or a struct. It needs to be public and implement the +/// trait `GenesisBuild` with [`#[pallet::genesis_build]`](`macro@genesis_build`). The type +/// generics are constrained to be either none, or `T` or `T: Config`. +/// +/// E.g: +/// +/// ```ignore +/// #[pallet::genesis_config] +/// pub struct GenesisConfig { +/// _myfield: BalanceOf, +/// } +/// ``` +#[proc_macro_attribute] +pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` +/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the pallet's +/// initial state. +/// +/// The impl must be defined as: +/// +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig<$maybe_generics> { +/// fn build(&self) { $expr } +/// } +/// ``` +/// +/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on +/// type `GenesisConfig` with generics none or `T`. +/// +/// E.g.: +/// +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// ``` +/// +/// ## Macro expansion +/// +/// The macro will add the following attribute: +/// * `#[cfg(feature = "std")]` +/// +/// The macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as a second +/// generic for non-instantiable pallets. +#[proc_macro_attribute] +pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::inherent]` attribute allows the pallet to provide some +/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). +/// An inherent is some piece of data that is inserted by a block authoring node at block +/// creation time and can either be accepted or rejected by validators based on whether the +/// data falls within an acceptable range. +/// +/// The most common inherent is the `timestamp` that is inserted into every block. Since there +/// is no way to validate timestamps, validators simply check that the timestamp reported by +/// the block authoring node falls within an acceptable range. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::inherent] +/// impl ProvideInherent for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// +/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type +/// `Pallet`, and some optional where clause. +/// +/// ## Macro expansion +/// +/// The macro currently makes no use of this information, but it might use this information in +/// the future to give information directly to `construct_runtime`. +#[proc_macro_attribute] +pub fn inherent(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned +/// transaction: +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::validate_unsigned] +/// impl ValidateUnsigned for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// +/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type +/// `Pallet`, and some optional where clause. +/// +/// NOTE: There is also the `sp_runtime::traits::SignedExtension` trait that can be used to add +/// some specific logic for transaction validation. +/// +/// ## Macro expansion +/// +/// The macro currently makes no use of this information, but it might use this information in +/// the future to give information directly to `construct_runtime`. +#[proc_macro_attribute] +pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. +/// +/// Item must be either a type alias, an enum, or a struct. It needs to be public. +/// +/// E.g.: +/// +/// ```ignore +/// #[pallet::origin] +/// pub struct Origin(PhantomData<(T)>); +/// ``` +/// +/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin +/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care +/// as it might require some migration. +/// +/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. +#[proc_macro_attribute] +pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 51aa05261dac3..9b0ee84c34d8a 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -744,7 +744,7 @@ macro_rules! assert_err { /// Assert an expression returns an error specified. /// -/// This can be used on`DispatchResultWithPostInfo` when the post info should +/// This can be used on `DispatchResultWithPostInfo` when the post info should /// be ignored. #[macro_export] macro_rules! assert_err_ignore_postinfo { @@ -1399,6 +1399,7 @@ pub mod pallet_prelude { PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; pub use codec::{Decode, Encode, MaxEncodedLen}; + pub use frame_support::pallet_macros::*; pub use scale_info::TypeInfo; pub use sp_runtime::{ traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, @@ -1413,38 +1414,133 @@ pub mod pallet_prelude { pub use sp_weights::Weight; } -/// `pallet` attribute macro allows to define a pallet to be used in `construct_runtime!`. +/// The `pallet` attribute macro defines a pallet that can be used with +/// [`construct_runtime!`]. It must be attached to a module named `pallet` as follows: /// -/// It is define by a module item: /// ```ignore /// #[pallet] /// pub mod pallet { -/// ... +/// ... /// } /// ``` /// -/// Inside the module the macro will parse item with the attribute: `#[pallet::*]`, some -/// attributes are mandatory, some other optional. -/// -/// The attribute are explained with the syntax of non instantiable pallets, to see how pallet -/// with instance work see below example. +/// Note that various types can be automatically imported using +/// [`frame_support::pallet_prelude`] and `frame_system::pallet_prelude`: /// -/// Note various type can be automatically imported using pallet_prelude in frame_support and -/// frame_system: /// ```ignore /// #[pallet] /// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// ... +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// ... /// } /// ``` /// -/// # Config trait: `#[pallet::config]` mandatory +/// # pallet::* Attributes +/// +/// The `pallet` macro will parse any items within your `pallet` module that are annotated with +/// `#[pallet::*]` attributes. Some of these attributes are mandatory and some are optional, +/// and they can attach to different types of items within your pallet depending on the +/// attribute in question. The full list of `#[pallet::*]` attributes is shown below in the +/// order in which they are mentioned in this document: +/// +/// * [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) +/// * [`pallet::config`](#config-trait-palletconfig-mandatory) +/// * [`pallet::constant`](#palletconstant) +/// * [`pallet::disable_frame_system_supertrait_check`](#disable_supertrait_check) +/// * [`pallet::generate_store($vis trait Store)`](#palletgenerate_storevis-trait-store) +/// * [`pallet::generate_storage_info`](#palletgenerate_storage_info) +/// * [`pallet::storage_version`](#palletstorage_version) +/// * [`pallet::hooks`](#hooks-pallethooks-optional) +/// * [`pallet::call`](#call-palletcall-optional) +/// * [`pallet::weight($expr)`](#palletweightexpr) +/// * [`pallet::compact`](#palletcompact-some_arg-some_type) +/// * [`pallet::call_index($idx)`](#palletcall_indexidx) +/// * [`pallet::extra_constants`](#extra-constants-palletextra_constants-optional) +/// * [`pallet::error`](#error-palleterror-optional) +/// * [`pallet::event`](#event-palletevent-optional) +/// * [`pallet::generate_deposit($visibility fn +/// deposit_event)`](#palletgenerate_depositvisibility-fn-deposit_event) +/// * [`pallet::storage`](#storage-palletstorage-optional) +/// * [`pallet::getter(fn $my_getter_fn_name)`](#palletgetterfn-my_getter_fn_name-optional) +/// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) +/// * [`pallet::unbounded`](#palletunbounded-optional) +/// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) +/// * [`cfg(..)`](#cfg-for-storage) (on storage items) +/// * [`pallet::type_value`](#type-value-pallettype_value-optional) +/// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) +/// * [`pallet::genesis_build`](#genesis-build-palletgenesis_build-optional) +/// * [`pallet::inherent`](#inherent-palletinherent-optional) +/// * [`pallet::validate_unsigned`](#validate-unsigned-palletvalidate_unsigned-optional) +/// * [`pallet::origin`](#origin-palletorigin-optional) +/// +/// Note that at compile-time, the `#[pallet]` macro will analyze and expand all of these +/// attributes, ultimately removing their AST nodes before they can be parsed as real +/// attribute macro calls. This means that technically we do not need attribute macro +/// definitions for any of these attributes, however, for consistency and discoverability +/// reasons, we still maintain stub attribute macro definitions for all of these attributes in +/// the [`pallet_macros`] module which is automatically included in all pallets as part of the +/// pallet prelude. The actual "work" for all of these attribute macros can be found in the +/// macro expansion for `#[pallet]`. +/// +/// Also note that in this document, pallet attributes are explained using the syntax of +/// non-instantiable pallets. For an example of an instantiable pallet, see [this +/// example](#example-of-an-instantiable-pallet). +/// +/// # Pallet struct placeholder: `#[pallet::pallet]` (mandatory) +/// +/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify +/// pallet information. +/// +/// The struct must be defined as follows: +/// ```ignore +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// ``` +/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. /// -/// The trait defining generics of the pallet. +/// ## Macro expansion: +/// +/// The macro adds this attribute to the struct definition: +/// ```ignore +/// #[derive( +/// frame_support::CloneNoBound, +/// frame_support::EqNoBound, +/// frame_support::PartialEqNoBound, +/// frame_support::RuntimeDebugNoBound, +/// )] +/// ``` +/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: +/// * [`GetStorageVersion`](`traits::GetStorageVersion`) +/// * [`OnGenesis`](`traits::OnGenesis`): contains some logic to write the pallet version into +/// storage. +/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. +/// +/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. +/// +/// It implements [`PalletInfoAccess`](`traits::PalletInfoAccess') on `Pallet` to ease access +/// to pallet information given by [`frame_support::traits::PalletInfo`]. (The implementation +/// uses the associated type `frame_system::Config::PalletInfo`). +/// +/// It implements [`StorageInfoTrait`](`traits::StorageInfoTrait`) on `Pallet` which give +/// information about all storages. +/// +/// If the attribute `generate_store` is set then the macro creates the trait `Store` and +/// implements it on `Pallet`. +/// +/// If the attribute `set_storage_max_encoded_len` is set then the macro calls +/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for each storage in the implementation of +/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet. Otherwise it implements +/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet using the +/// [`PartialStorageInfoTrait`](`traits::PartialStorageInfoTrait`) implementation of storages. +/// +/// # Config trait: `#[pallet::config]` (mandatory) +/// +/// The mandatory attribute `#[pallet::config]` defines the configurable options for the +/// pallet. +/// +/// Item must be defined as: /// -/// Item must be defined as /// ```ignore /// #[pallet::config] /// pub trait Config: frame_system::Config + $optionally_some_other_supertraits @@ -1453,74 +1549,103 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular trait definition named `Config`, with supertrait `frame_system::Config`, -/// optionally other supertrait and where clause. /// -/// The associated type `RuntimeEvent` is reserved, if defined it must bounds -/// `From` and `IsType<::RuntimeEvent>`, see -/// `#[pallet::event]` for more information. +/// I.e. a regular trait definition named `Config`, with the supertrait +/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. +/// (Specifying other supertraits here is known as [tight +/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) +/// +/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds +/// `From` and `IsType<::RuntimeEvent>`. +/// +/// [`pallet::event`](`frame_support::pallet_macros::event`) must be present if `RuntimeEvent` +/// exists as a config item in your `#[pallet::config]`. +/// +/// Also see [`pallet::config`](`frame_support::pallet_macros::config`) +/// +/// ## `pallet::constant` +/// +/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by +/// [`Get`](crate::traits::Get) from [`pallet::config`](#palletconfig) into metadata, e.g.: /// -/// To put `Get` associated type into metadatas, use the attribute `#[pallet::constant]`, e.g.: /// ```ignore /// #[pallet::config] /// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; +/// #[pallet::constant] +/// type Foo: Get; /// } /// ``` /// +/// Also see [`pallet::constant`](`frame_support::pallet_macros::constant`) +/// +/// ## `pallet::disable_frame_system_supertrait_check` +/// +/// /// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `#[pallet::disable_frame_system_supertrait_check]`, e.g.: +/// `pallet::disable_frame_system_supertrait_check`, e.g.: +/// /// ```ignore /// #[pallet::config] /// #[pallet::disable_frame_system_supertrait_check] /// pub trait Config: pallet_timestamp::Config {} /// ``` /// -/// ### Macro expansion: +/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you +/// want to write an alternative to the `frame_system` pallet. /// -/// The macro expand pallet constant metadata with the information given by -/// `#[pallet::constant]`. +/// Also see +/// [`pallet::disable_frame_system_supertrait_check`](`frame_support::pallet_macros::disable_frame_system_supertrait_check`) /// -/// # Pallet struct placeholder: `#[pallet::pallet]` mandatory +/// ## Macro expansion: /// -/// The placeholder struct, on which is implemented pallet informations. +/// The macro expands pallet constant metadata with the information given by +/// `#[pallet::constant]`. /// -/// Item must be defined as followed: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` -/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. +/// # `pallet::generate_store($vis trait Store)` +/// +/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with +/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: /// -/// To generate a `Store` trait associating all storages, use the attribute -/// `#[pallet::generate_store($vis trait Store)]`, e.g.: /// ```ignore /// #[pallet::pallet] /// #[pallet::generate_store(pub(super) trait Store)] /// pub struct Pallet(_); /// ``` -/// More precisely the store trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing to access the storage from pallet struct. +/// More precisely, the `Store` trait contains an associated type for each storage. It is +/// implemented for `Pallet` allowing access to the storage from pallet struct. /// /// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using /// `::Foo`. /// +/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct +/// definition. +/// +/// Also see [`pallet::generate_store`](`frame_support::pallet_macros::generate_store`). +/// +/// # `pallet::generate_storage_info` +/// /// To generate the full storage info (used for PoV calculation) use the attribute /// `#[pallet::generate_storage_info]`, e.g.: +/// /// ```ignore /// #[pallet::pallet] /// #[pallet::generate_storage_info] /// pub struct Pallet(_); /// ``` /// -/// This require all storage to implement the trait [`traits::StorageInfoTrait`], thus all keys -/// and value types must bound [`pallet_prelude::MaxEncodedLen`]. -/// Some individual storage can opt-out from this constraint by using `#[pallet::unbounded]`, -/// see `#[pallet::storage]` documentation. +/// This requires all storage items to implement the trait [`traits::StorageInfoTrait`], thus +/// all keys and value types must be bound by [`pallet_prelude::MaxEncodedLen`]. Individual +/// storages can opt-out from this constraint by using `#[pallet::unbounded]` (see +/// `#[pallet::storage]` for more info). /// -/// As the macro implements [`traits::GetStorageVersion`], the current storage version needs to -/// be communicated to the macro. This can be done by using the `storage_version` attribute: +/// Also see [`pallet::generate_storage_info`](`frame_support::pallet_macros::generate_storage_info`) +/// +/// # `pallet::storage_version` +/// +/// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro +/// implements [`traits::GetStorageVersion`], the current storage version needs to be +/// communicated to the macro. This can be done by using the `pallet::storage_version` +/// attribute: /// /// ```ignore /// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); @@ -1532,75 +1657,34 @@ pub mod pallet_prelude { /// /// If not present, the current storage version is set to the default value. /// -/// ### Macro expansion: -/// -/// The macro add this attribute to the struct definition: -/// ```ignore -/// #[derive( -/// frame_support::CloneNoBound, -/// frame_support::EqNoBound, -/// frame_support::PartialEqNoBound, -/// frame_support::RuntimeDebugNoBound, -/// )] -/// ``` -/// and replace the type `_` by `PhantomData`. -/// -/// It implements on pallet: -/// * [`traits::GetStorageVersion`] -/// * [`traits::OnGenesis`]: contains some logic to write pallet version into storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by [`construct_runtime`]. -/// -/// It implements [`traits::PalletInfoAccess`] on `Pallet` to ease access to pallet -/// informations given by [`frame_support::traits::PalletInfo`]. -/// (The implementation uses the associated type `frame_system::Config::PalletInfo`). -/// -/// It implements [`traits::StorageInfoTrait`] on `Pallet` which give information about all -/// storages. -/// -/// If the attribute generate_store is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. -/// -/// If the attribute set_storage_max_encoded_len is set then the macro call -/// [`traits::StorageInfoTrait`] for each storage in the implementation of -/// [`traits::StorageInfoTrait`] for the pallet. -/// Otherwise it implements [`traits::StorageInfoTrait`] for the pallet using the -/// [`traits::PartialStorageInfoTrait`] implementation of storages. +/// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) /// -/// # Hooks: `#[pallet::hooks]` optional +/// # Hooks: `#[pallet::hooks]` (optional) /// -/// Implementation of `Hooks` on `Pallet` allowing to define some specific pallet logic. +/// The `pallet::hooks` attribute allows you to specify a `Hooks` implementation for `Pallet` +/// that specifies pallet-specific logic. /// -/// Item must be defined as +/// The item the attribute attaches to must be defined as follows: /// ```ignore /// #[pallet::hooks] /// impl Hooks> for Pallet $optional_where_clause { +/// ... /// } /// ``` /// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` -/// and with an optional where clause. +/// `Hooks>` (they are defined in preludes), for the type `Pallet` and +/// with an optional where clause. /// -/// If no `#[pallet::hooks]` exists, then a default implementation corresponding to the -/// following code is automatically generated: +/// If no `#[pallet::hooks]` exists, then the following default implementation is +/// automatically generated: /// ```ignore /// #[pallet::hooks] /// impl Hooks> for Pallet {} /// ``` /// -/// ### Macro expansion: -/// -/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, -/// `OffchainWorker`, `IntegrityTest` using `Hooks` implementation. -/// -/// NOTE: OnRuntimeUpgrade is implemented with `Hooks::on_runtime_upgrade` and some additional -/// logic. E.g. logic to write pallet version into storage. +/// Also see [`pallet::hooks`](`frame_support::pallet_macros::hooks`) /// -/// NOTE: The macro also adds some tracing logic when implementing the above traits. The -/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. -/// -/// # Call: `#[pallet::call]` optional +/// # Call: `#[pallet::call]` (optional) /// /// Implementation of pallet dispatchables. /// @@ -1622,53 +1706,50 @@ pub mod pallet_prelude { /// } /// ``` /// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with -/// optional where clause. +/// an optional where clause. +/// +/// ## `#[pallet::weight($expr)]` +/// +/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the +/// first argument must be `origin: OriginFor`. +/// +/// Also see [`pallet::weight`](`frame_support::pallet_macros::weight`) +/// +/// ### `#[pallet::compact] $some_arg: $some_type` +/// +/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must +/// return a `DispatchResultWithPostInfo` or `DispatchResult`. /// -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, -/// the first argument must be `origin: OriginFor`, compact encoding for argument can be -/// used using `#[pallet::compact]`, function must return `DispatchResultWithPostInfo` or -/// `DispatchResult`. +/// Also see [`pallet::compact`](`frame_support::pallet_macros::compact`) +/// +/// ## `#[pallet::call_index($idx)]` /// /// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, -/// which defines and sets the codec index for the dispatchable function in the `Call` enum. +/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. /// /// All call indexes start from 0, until it encounters a dispatchable function with a defined /// call index. The dispatchable function that lexically follows the function with a defined /// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` has -/// a call index of 10, then `fn qux` will have an index of 11, instead of 1. -/// -/// All arguments must implement `Debug`, `PartialEq`, `Eq`, `Decode`, `Encode`, `Clone`. For -/// ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` +/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. /// -/// If no `#[pallet::call]` exists, then a default implementation corresponding to the -/// following code is automatically generated: -/// ```ignore -/// #[pallet::call] -/// impl Pallet {} -/// ``` -/// -/// **WARNING**: modifying dispatchables, changing their order, removing some must be done with -/// care. Indeed this will change the outer runtime call type (which is an enum with one -/// variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// pallet-scheduler). Thus migration might be needed. To mitigate against some of this, the +/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be +/// done with care. Indeed this will change the outer runtime call type (which is an enum with +/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in +/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the /// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. -/// -/// ### Macro expansion +/// that the `Call` enum encoding does not change after modification. As a general rule of +/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain +/// the existing order of calls. /// -/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: -/// `Clone`, `Eq`, `PartialEq`, `Debug` (with stripped implementation in `not("std")`), -/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, `UnfilteredDispatchable`. +/// Also see [`pallet::call_index`](`frame_support::pallet_macros::call_index`) /// -/// The macro implement the `Callable` trait on `Pallet` and a function `call_functions` which -/// returns the dispatchable metadata. +/// # Extra constants: `#[pallet::extra_constants]` (optional) /// -/// # Extra constants: `#[pallet::extra_constants]` optional -/// -/// Allow to define some extra constants to put into constant metadata. +/// Allows you to define some extra constants to be added into constant metadata. /// /// Item must be defined as: +/// /// ```ignore /// #[pallet::extra_constants] /// impl Pallet where $optional_where_clause { @@ -1679,19 +1760,23 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular rust implement block with some optional where clause and functions with 0 -/// args, 0 generics, and some return type. +/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, +/// 0 generics, and some return type. /// -/// ### Macro expansion +/// ## Macro expansion /// -/// The macro add some extra constant to pallet constant metadata. +/// The macro add some extra constants to pallet constant metadata. /// -/// # Error: `#[pallet::error]` optional +/// Also see: [`pallet::extra_constants`](`frame_support::pallet_macros::extra_constants`) /// -/// Allow to define an error type to be return from dispatchable on error. -/// This error type informations are put into metadata. +/// # Error: `#[pallet::error]` (optional) +/// +/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned +/// from the dispatchable when an error occurs. The information for this error type is then +/// stored in metadata. /// /// Item must be defined as: +/// /// ```ignore /// #[pallet::error] /// pub enum Error { @@ -1702,7 +1787,7 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular rust enum named `Error`, with generic `T` and fieldless or multiple-field +/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field /// variants. /// /// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to be @@ -1710,29 +1795,28 @@ pub mod pallet_prelude { /// preferably 1 byte in size in order to reduce storage size. The error enum itself has an /// absolute maximum encoded size specified by [`MAX_MODULE_ERROR_ENCODED_SIZE`]. /// +/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to +/// diagnose problems and give a better experience to the user. Don't skimp on having lots of +/// individual error conditions.) +/// /// Field types in enum variants must also implement [`PalletError`](traits::PalletError), /// otherwise the pallet will fail to compile. Rust primitive types have already implemented /// the [`PalletError`](traits::PalletError) trait along with some commonly used stdlib types -/// such as `Option` and `PhantomData`, and hence in most use cases, a manual implementation is -/// not necessary and is discouraged. +/// such as [`Option`] and [`PhantomData`](`frame_support::dispatch::marker::PhantomData`), and +/// hence in most use cases, a manual implementation is not necessary and is discouraged. /// -/// The generic `T` mustn't bound anything and where clause is not allowed. But bounds and -/// where clause shouldn't be needed for any usecase. +/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, +/// bounds and/or a where clause should not needed for any use-case. /// -/// ### Macro expansion +/// Also see: [`pallet::error`](`frame_support::pallet_macros::error`) /// -/// The macro implements `Debug` trait and functions `as_u8` using variant position, and -/// `as_str` using variant doc. +/// # Event: `#[pallet::event]` (optional) /// -/// The macro implements `From>` for `&'static str`. -/// The macro implements `From>` for `DispatchError`. +/// Allows you to define pallet events. Pallet events are stored under the `system` / `events` +/// key when the block is applied (and then replaced when the next block writes it's events). /// -/// # Event: `#[pallet::event]` optional +/// The Event enum must be defined as follows: /// -/// Allow to define pallet events, pallet events are stored in the block when they deposited -/// (and removed in next block). -/// -/// Item is defined as: /// ```ignore /// #[pallet::event] /// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional @@ -1742,157 +1826,174 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. an enum (with named or unnamed fields variant), named Event, with generic: none or `T` -/// or `T: Config`, and optional where clause. /// -/// Each field must implement `Clone`, `Eq`, `PartialEq`, `Encode`, `Decode`, and `Debug` (on -/// std only). -/// For ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or +/// `T` or `T: Config`, and optional w here clause. /// -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generate a helper -/// function on `Pallet` to deposit event. +/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`Encode`], [`Decode`], and +/// [`Debug`] (on std only). For ease of use, bound by the trait +/// [`Member`](`frame_support::pallet_prelude::Member`), available in +/// frame_support::pallet_prelude. /// -/// NOTE: For instantiable pallet, event must be generic over T and I. +/// Also see [`pallet::event`](`frame_support::pallet_macros::event`) /// -/// ### Macro expansion: +/// ## `#[pallet::generate_deposit($visibility fn deposit_event)]` /// -/// Macro will add on enum `Event` the attributes: -/// * `#[derive(frame_support::CloneNoBound)]`, -/// * `#[derive(frame_support::EqNoBound)]`, -/// * `#[derive(frame_support::PartialEqNoBound)]`, -/// * `#[derive(codec::Encode)]`, -/// * `#[derive(codec::Decode)]`, -/// * `#[derive(frame_support::RuntimeDebugNoBound)]` +/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a +/// helper function on `Pallet` that handles deposit events. /// -/// Macro implements `From>` for (). +/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. /// -/// Macro implements metadata function on `Event` returning the `EventMetadata`. +/// Also see [`pallet::generate_deposit`](`frame_support::pallet_macros::generate_deposit`) /// -/// If `#[pallet::generate_deposit]` then macro implement `fn deposit_event` on `Pallet`. +/// # Storage: `#[pallet::storage]` (optional) /// -/// # Storage: `#[pallet::storage]` optional +/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime +/// storage and also set its metadata. This attribute can be used multiple times. /// -/// Allow to define some abstract storage inside runtime storage and also set its metadata. -/// This attribute can be used multiple times. +/// Item should be defined as: /// -/// Item is defined as: /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn $getter_name)] // optional /// $vis type $StorageName<$some_generic> $optional_where_clause /// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; /// ``` -/// or with unnamed generic +/// +/// or with unnamed generic: +/// /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn $getter_name)] // optional /// $vis type $StorageName<$some_generic> $optional_where_clause /// = $StorageType<_, $some_generics, ...>; /// ``` -/// I.e. it must be a type alias, with generics: `T` or `T: Config`, aliased type must be one -/// of `StorageValue`, `StorageMap` or `StorageDoubleMap` (defined in frame_support). -/// The generic arguments of the storage type can be given in two manner: named and unnamed. -/// For named generic argument: the name for each argument is the one as define on the storage -/// struct: -/// * [`pallet_prelude::StorageValue`] expect `Value` and optionally `QueryKind` and `OnEmpty`, -/// * [`pallet_prelude::StorageMap`] expect `Hasher`, `Key`, `Value` and optionally `QueryKind` -/// and `OnEmpty`, -/// * [`pallet_prelude::CountedStorageMap`] expect `Hasher`, `Key`, `Value` and optionally -/// `QueryKind` and `OnEmpty`, -/// * [`pallet_prelude::StorageDoubleMap`] expect `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` -/// and optionally `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic argument: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal declaration of type generic in rust. -/// -/// The Prefix generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. -/// E.g. if runtime names the pallet "MyExample" then the storage `type Foo = ...` use the -/// prefix: `Twox128(b"MyExample") ++ Twox128(b"Foo")`. /// -/// For the `CountedStorageMap` variant, the Prefix also implements -/// `CountedStorageMapInstance`. It associate a `CounterPrefix`, which is implemented same as -/// above, but the storage prefix is prepend with `"CounterFor"`. -/// E.g. if runtime names the pallet "MyExample" then the storage -/// `type Foo = CountedStorageaMap<...>` will store its counter at the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. +/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be +/// one of [`StorageValue`](`pallet_prelude::StorageValue`), +/// [`StorageMap`](`pallet_prelude::StorageMap`) or +/// [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`). The generic arguments of the +/// storage type can be given in two manners: named and unnamed. For named generic arguments, +/// the name for each argument should match the name defined for it on the storage struct: +/// * [`StorageValue`](`pallet_prelude::StorageValue`) expects `Value` and optionally +/// `QueryKind` and `OnEmpty`, +/// * [`StorageMap`](`pallet_prelude::StorageMap`) expects `Hasher`, `Key`, `Value` and +/// optionally `QueryKind` and `OnEmpty`, +/// * [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) expects `Hasher`, `Key`, +/// `Value` and optionally `QueryKind` and `OnEmpty`, +/// * [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`) expects `Hasher1`, `Key1`, +/// `Hasher2`, `Key2`, `Value` and optionally `QueryKind` and `OnEmpty`. +/// +/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the +/// macro and other generic must declared as a normal generic type declaration. +/// +/// The `Prefix` generic written by the macro is generated using +/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names +/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: +/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. +/// +/// For the [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) variant, the `Prefix` +/// also implements +/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`). +/// It also associates a [`CounterPrefix`](`pallet_prelude::CounterPrefix'), which is +/// implemented the same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. +/// if runtime names the pallet "MyExample" then the storage `type Foo = +/// CountedStorageaMap<...>` will store its counter at the prefix: `Twox128(b"MyExample") ++ +/// Twox128(b"CounterForFoo")`. /// /// E.g: +/// /// ```ignore /// #[pallet::storage] /// pub(super) type MyStorage = StorageMap; /// ``` -/// In this case the final prefix used by the map is -/// `Twox128(b"MyExample") ++ Twox128(b"OtherName")`. /// -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows to define a +/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ +/// Twox128(b"OtherName")`. +/// +/// Also see [`pallet::storage`](`frame_support::pallet_macros::storage`) +/// +/// ## `#[pallet::getter(fn $my_getter_fn_name)]` (optional) +/// +/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a /// getter function on `Pallet`. /// -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allow to define the storage -/// prefix to use, see how `Prefix` generic is implemented above. +/// Also see [`pallet::getter`](`frame_support::pallet_macros::getter`) +/// +/// ## `#[pallet::storage_prefix = "SomeName"]` (optional) +/// +/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the +/// storage prefix to use, see how `Prefix` generic is implemented above. This is helpful if +/// you wish to rename the storage field but don't want to perform a migration. /// /// E.g: +/// /// ```ignore /// #[pallet::storage] /// #[pallet::storage_prefix = "foo"] /// #[pallet::getter(fn my_storage)] /// pub(super) type MyStorage = StorageMap; /// ``` +/// /// or +/// /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn my_storage)] /// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; /// ``` /// -/// The optional attribute `#[pallet::unbounded]` allows to declare the storage as unbounded. -/// When implementating the storage info (when `#[pallet::generate_storage_info]` is specified -/// on the pallet struct placeholder), the size of the storage will be declared as unbounded. -/// This can be useful for storage which can never go into PoV (Proof of Validity). +/// Also see [`pallet::storage_prefix`](`frame_support::pallet_macros::storage_prefix`) +/// +/// ## `#[pallet::unbounded]` (optional) +/// +/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When +/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on +/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This +/// can be useful for storage which can never go into PoV (Proof of Validity). +/// +/// Also see [`pallet::unbounded`](`frame_support::pallet_macros::unbounded`) +/// +/// ## `#[pallet::whitelist_storage]` (optional) /// +/// The optional attribute `#[pallet::whitelist_storage]` will declare the storage as +/// whitelisted from benchmarking. +/// +/// See +/// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) +/// for more info. +/// +/// ## `#[cfg(..)]` (for storage) /// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. /// /// E.g: +/// /// ```ignore /// #[cfg(feature = "my-feature")] /// #[pallet::storage] /// pub(super) type MyStorage = StorageValue; /// ``` /// -/// The optional attribute `#[pallet::whitelist_storage]` will declare the -/// storage as whitelisted from benchmarking. Doing so will exclude reads of -/// that value's storage key from counting towards weight calculations during -/// benchmarking. -/// -/// This attribute should only be attached to storages that are known to be -/// read/used in every block. This will result in a more accurate benchmarking weight. -/// -/// ### Example -/// ```ignore -/// #[pallet::storage] -/// #[pallet::whitelist_storage] -/// pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; -/// ``` -/// /// All the `cfg` attributes are automatically copied to the items generated for the storage, /// i.e. the getter, storage prefix, and the metadata element etc. /// /// Any type placed as the `QueryKind` parameter must implement /// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this /// trait by default: -/// 1. [`frame_support::storage::types::OptionQuery`], the default `QueryKind` used when this -/// type parameter is omitted. Specifying this as the `QueryKind` would cause storage map -/// APIs that return a `QueryKind` to instead return an `Option`, returning `Some` when a -/// value does exist under a specified storage key, and `None` otherwise. -/// 2. [`frame_support::storage::types::ValueQuery`] causes storage map APIs that return a -/// `QueryKind` to instead return the value type. In cases where a value does not exist -/// under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is used -/// to return an appropriate value. -/// 3. [`frame_support::storage::types::ResultQuery`] causes storage map APIs that return a -/// `QueryKind` to instead return a `Result`, with `T` being the value type and `E` -/// being the pallet error type specified by the `#[pallet::error]` attribute. In cases -/// where a value does not exist under a specified storage key, an `Err` with the specified -/// pallet error variant is returned. +/// +/// 1. [`OptionQuery`](`frame_support::storage::types::OptionQuery`), the default `QueryKind` +/// used when this type parameter is omitted. Specifying this as the `QueryKind` would cause +/// storage map APIs that return a `QueryKind` to instead return an [`Option`], returning +/// `Some` when a value does exist under a specified storage key, and `None` otherwise. +/// 2. [`ValueQuery`](`frame_support::storage::types::ValueQuery`) causes storage map APIs that +/// return a `QueryKind` to instead return the value type. In cases where a value does not +/// exist under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is +/// used to return an appropriate value. +/// 3. [`ResultQuery`](`frame_support::storage::types::ResultQuery`) causes storage map APIs +/// that return a `QueryKind` to instead return a `Result`, with `T` being the value +/// type and `E` being the pallet error type specified by the `#[pallet::error]` attribute. +/// In cases where a value does not exist under a specified storage key, an `Err` with the +/// specified pallet error variant is returned. /// /// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some /// type alias then the generation of the getter might fail. In this case the getter can be @@ -1902,60 +2003,62 @@ pub mod pallet_prelude { /// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the /// storage item. Thus generic hasher is supported. /// -/// ### Macro expansion +/// ## Macro expansion /// /// For each storage item the macro generates a struct named /// `_GeneratedPrefixForStorage$NameOfStorage`, and implements /// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It -/// then uses it as the first generic of the aliased type. -/// For `CountedStorageMap`, `CountedStorageMapInstance` is implemented, and another similar -/// struct is generated. +/// then uses it as the first generic of the aliased type. For +/// [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`), +/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`) +/// is implemented, and another similar struct is generated. /// -/// For named generic, the macro will reorder the generics, and remove the names. +/// For a named generic, the macro will reorder the generics, and remove the names. /// -/// The macro implements the function `storage_metadata` on `Pallet` implementing the metadata -/// for all storage items based on their kind: +/// The macro implements the function `storage_metadata` on the `Pallet` implementing the +/// metadata for all storage items based on their kind: /// * for a storage value, the type of the value is copied into the metadata /// * for a storage map, the type of the values and the key's type is copied into the metadata -/// * for a storage double map, the type of the values, and the types of key1 and key2 are +/// * for a storage double map, the type of the values, and the types of `key1` and `key2` are /// copied into the metadata. /// -/// # Type value: `#[pallet::type_value]` optional +/// # Type value: `#[pallet::type_value]` (optional) +/// +/// The `#[pallet::type_value]` attribute lets you define a struct implementing the +/// [`Get`](crate::traits::Get) trait to ease use of storage types. This attribute is meant to +/// be used alongside [`#[pallet::storage]`](#storage-palletstorage-optional) to define a +/// storage's default value. This attribute can be used multiple times. /// -/// Helper to define a struct implementing `Get` trait. To ease use of storage types. -/// This attribute can be used multiple time. +/// Item must be defined as: /// -/// Item is defined as /// ```ignore /// #[pallet::type_value] /// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } /// ``` +/// /// I.e.: a function definition with generics none or `T: Config` and a returned type. /// /// E.g.: +/// /// ```ignore /// #[pallet::type_value] /// fn MyDefault() -> T::Balance { 3.into() } /// ``` /// -/// NOTE: This attribute is meant to be used alongside `#[pallet::storage]` to defined some -/// specific default value in storage. -/// -/// ### Macro expansion -/// -/// Macro renames the function to some internal name, generate a struct with the original name -/// of the function and its generic, and implement `Get<$ReturnType>` by calling the user -/// defined function. +/// Also see [`pallet::type_value`](`frame_support::pallet_macros::type_value`) /// -/// # Genesis config: `#[pallet::genesis_config]` optional +/// # Genesis config: `#[pallet::genesis_config]` (optional) /// -/// Allow to define the genesis configuration of the pallet. +/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration +/// for the pallet. /// -/// Item is defined as either an enum or a struct. -/// It needs to be public and implement trait GenesisBuild with `#[pallet::genesis_build]`. -/// The type generics is constrained to be either none, or `T` or `T: Config`. +/// Item is defined as either an enum or a struct. It needs to be public and implement the +/// trait [`GenesisBuild`](`traits::GenesisBuild`) with +/// [`#[pallet::genesis_build]`](#genesis-build-palletgenesis_build-optional). The type +/// generics are constrained to be either none, or `T` or `T: Config`. /// /// E.g: +/// /// ```ignore /// #[pallet::genesis_config] /// pub struct GenesisConfig { @@ -1963,31 +2066,28 @@ pub mod pallet_prelude { /// } /// ``` /// -/// ### Macro expansion +/// Also see [`pallet::genesis_config`](`frame_support::pallet_macros::genesis_config`) /// -/// Macro will add the following attribute on it: -/// * `#[cfg(feature = "std")]` -/// * `#[derive(Serialize, Deserialize)]` -/// * `#[serde(rename_all = "camelCase")]` -/// * `#[serde(deny_unknown_fields)]` -/// * `#[serde(bound(serialize = ""))]` -/// * `#[serde(bound(deserialize = ""))]` +/// # Genesis build: `#[pallet::genesis_build]` (optional) /// -/// # Genesis build: `#[pallet::genesis_build]` optional +/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` +/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the +/// pallet's initial state. /// -/// Allow to define how genesis_configuration is built. +/// The impl must be defined as: /// -/// Item is defined as /// ```ignore /// #[pallet::genesis_build] /// impl GenesisBuild for GenesisConfig<$maybe_generics> { /// fn build(&self) { $expr } /// } /// ``` -/// I.e. a rust trait implementation with generic `T: Config`, of trait `GenesisBuild` on +/// +/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on /// type `GenesisConfig` with generics none or `T`. /// /// E.g.: +/// /// ```ignore /// #[pallet::genesis_build] /// impl GenesisBuild for GenesisConfig { @@ -1995,87 +2095,93 @@ pub mod pallet_prelude { /// } /// ``` /// -/// ### Macro expansion +/// Also see [`pallet::genesis_build`](`frame_support::pallet_macros::genesis_build`) /// -/// Macro will add the following attribute on it: -/// * `#[cfg(feature = "std")]` +/// # Inherent: `#[pallet::inherent]` (optional) /// -/// Macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as second generic -/// for non-instantiable pallets. +/// The `#[pallet::inherent]` attribute allows the pallet to provide some +/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). +/// An inherent is some piece of data that is inserted by a block authoring node at block +/// creation time and can either be accepted or rejected by validators based on whether the +/// data falls within an acceptable range. /// -/// # Inherent: `#[pallet::inherent]` optional +/// The most common inherent is the `timestamp` that is inserted into every block. Since there +/// is no way to validate timestamps, validators simply check that the timestamp reported by +/// the block authoring node falls within an acceptable range. /// -/// Allow the pallet to provide some inherent: +/// Item must be defined as: /// -/// Item is defined as: /// ```ignore /// #[pallet::inherent] /// impl ProvideInherent for Pallet { /// // ... regular trait implementation /// } /// ``` -/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type -/// `Pallet`, and some optional where clause. /// -/// ### Macro expansion +/// I.e. a trait implementation with bound `T: Config`, of trait +/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) for type `Pallet`, and some +/// optional where clause. +/// +/// Also see [`pallet::inherent`](`frame_support::pallet_macros::inherent`) /// -/// Macro make currently no use of this information, but it might use this information in the -/// future to give information directly to construct_runtime. +/// # Validate unsigned: `#[pallet::validate_unsigned]` (optional) /// -/// # Validate unsigned: `#[pallet::validate_unsigned]` optional +/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned +/// transaction: /// -/// Allow the pallet to validate some unsigned transaction: +/// Item must be defined as: /// -/// Item is defined as: /// ```ignore /// #[pallet::validate_unsigned] /// impl ValidateUnsigned for Pallet { /// // ... regular trait implementation /// } /// ``` -/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type -/// `Pallet`, and some optional where clause. /// -/// NOTE: There is also `sp_runtime::traits::SignedExtension` that can be used to add some -/// specific logic for transaction validation. +/// I.e. a trait implementation with bound `T: Config`, of trait +/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) for type `Pallet`, and some +/// optional where clause. /// -/// ### Macro expansion +/// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used to +/// add some specific logic for transaction validation. /// -/// Macro make currently no use of this information, but it might use this information in the -/// future to give information directly to construct_runtime. +/// Also see [`pallet::validate_unsigned`](`frame_support::pallet_macros::validate_unsigned`) /// -/// # Origin: `#[pallet::origin]` optional +/// # Origin: `#[pallet::origin]` (optional) /// -/// Allow to define some origin for the pallet. +/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. /// -/// Item must be either a type alias or an enum or a struct. It needs to be public. +/// Item must be either a type alias, an enum, or a struct. It needs to be public. /// /// E.g.: +/// /// ```ignore /// #[pallet::origin] /// pub struct Origin(PhantomData<(T)>); /// ``` /// /// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in pallet-scheduler), thus any change must be done with care +/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care /// as it might require some migration. /// -/// NOTE: for instantiable pallet, origin must be generic over T and I. +/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. /// -/// # General notes on instantiable pallet +/// Also see [`pallet::origin`](`frame_support::pallet_macros::origin`) /// -/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allow runtime -/// to implement multiple instance of the pallet, by using different type for the generic. -/// This is the sole purpose of the generic `I`. -/// But because `PalletInfo` requires `Pallet` placeholder to be static it is important to -/// bound `'static` whenever `PalletInfo` can be used. -/// And in order to have instantiable pallet usable as a regular pallet without instance, it is -/// important to bound `= ()` on every types. +/// # General notes on instantiable pallets /// -/// Thus impl bound look like `impl, I: 'static>`, and types look like +/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allows +/// runtime to implement multiple instances of the pallet, by using different types for the +/// generic. This is the sole purpose of the generic `I`, but because +/// [`PalletInfo`](`traits::PalletInfo`) requires the `Pallet` placeholder to be static, it is +/// important to bound by `'static` whenever [`PalletInfo`](`traits::PalletInfo`) can be used. +/// Additionally, in order to make an instantiable pallet usable as a regular pallet without an +/// instance, it is important to bound by `= ()` on every type. +/// +/// Thus impl bound looks like `impl, I: 'static>`, and types look like /// `SomeType` or `SomeType, I: 'static = ()>`. /// -/// # Example for pallet without instance. +/// # Example of a non-instantiable pallet /// /// ``` /// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` @@ -2269,7 +2375,7 @@ pub mod pallet_prelude { /// } /// ``` /// -/// # Example for pallet with instance. +/// # Example of an instantiable pallet /// /// ``` /// pub use pallet::*; @@ -2400,28 +2506,28 @@ pub mod pallet_prelude { /// } /// ``` /// -/// ## Upgrade guidelines: +/// # Upgrade guidelines /// /// 1. Export the metadata of the pallet for later checks /// - run your node with the pallet active /// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p /// > meta.json` -/// 2. generate the template upgrade for the pallet provided by decl_storage -/// with environment variable `PRINT_PALLET_UPGRADE`: -/// `PRINT_PALLET_UPGRADE=1 cargo check -p my_pallet` This template can be -/// used as information it contains all information for storages, genesis -/// config and genesis build. -/// 3. reorganize pallet to have trait `Config`, `decl_*` macros, `ValidateUnsigned`, -/// `ProvideInherent`, `Origin` all together in one file. Suggested order: -/// * Config, -/// * decl_module, -/// * decl_event, -/// * decl_error, -/// * decl_storage, -/// * origin, -/// * validate_unsigned, -/// * provide_inherent, -/// so far it should compile and all be correct. +/// 2. Generate the template upgrade for the pallet provided by `decl_storage` with the +/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p +/// my_pallet`. This template can be used as it contains all information for storages, +/// genesis config and genesis build. +/// 3. Reorganize the pallet to have the trait `Config`, `decl_*` macros, +/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), +/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one +/// file. Suggested order: +/// * `Config`, +/// * `decl_module`, +/// * `decl_event`, +/// * `decl_error`, +/// * `decl_storage`, +/// * `origin`, +/// * `validate_unsigned`, +/// * `provide_inherent`, so far it should compile and all be correct. /// 4. start writing the new pallet module /// ```ignore /// pub use pallet::*; @@ -2441,16 +2547,17 @@ pub mod pallet_prelude { /// } /// ``` /// 5. **migrate Config**: move trait into the module with -/// * all const in decl_module to `#[pallet::constant]` -/// * add bound `IsType<::RuntimeEvent>` to `type RuntimeEvent` +/// * all const in `decl_module` to [`#[pallet::constant]`](#palletconstant) +/// * add the bound `IsType<::RuntimeEvent>` to `type +/// RuntimeEvent` /// 7. **migrate decl_module**: write: /// ```ignore /// #[pallet::hooks] /// impl Hooks for Pallet { /// } /// ``` -/// and write inside -/// `on_initialize`, `on_finalize`, `on_runtime_upgrade`, `offchain_worker`, `integrity_test`. +/// and write inside `on_initialize`, `on_finalize`, `on_runtime_upgrade`, +/// `offchain_worker`, and `integrity_test`. /// /// then write: /// ```ignore @@ -2458,25 +2565,26 @@ pub mod pallet_prelude { /// impl Pallet { /// } /// ``` -/// and write inside all the calls in decl_module with a few changes in the signature: -/// - origin must now be written completely, e.g. `origin: OriginFor` -/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you -/// might -/// need to put `Ok(().into())` at the end or the function. -/// - `#[compact]` must now be written `#[pallet::compact]` -/// - `#[weight = ..]` must now be written `#[pallet::weight(..)]` -/// -/// 7. **migrate event**: -/// rewrite as a simple enum under with the attribute `#[pallet::event]`, -/// use `#[pallet::generate_deposit($vis fn deposit_event)]` to generate deposit_event, -/// 8. **migrate error**: rewrite it with attribute `#[pallet::error]`. -/// 9. **migrate storage**: -/// decl_storage provide an upgrade template (see 3.). All storages, genesis config, genesis -/// build and default implementation of genesis config can be taken from it directly. -/// -/// Otherwise here is the manual process: -/// -/// first migrate the genesis logic. write: +/// and write inside all the calls in `decl_module` with a few changes in the signature: +/// - origin must now be written completely, e.g. `origin: OriginFor` +/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you +/// might need to put `Ok(().into())` at the end or the function. +/// - `#[compact]` must now be written +/// [`#[pallet::compact]`](#palletcompact-some_arg-some_type) +/// - `#[weight = ..]` must now be written [`#[pallet::weight(..)]`](#palletweightexpr) +/// +/// 7. **migrate event**: rewrite as a simple enum with the attribute +/// [`#[pallet::event]`](#event-palletevent-optional), use [`#[pallet::generate_deposit($vis +/// fn deposit_event)]`](#event-palletevent-optional) to generate `deposit_event`, +/// 8. **migrate error**: rewrite it with attribute +/// [`#[pallet::error]`](#error-palleterror-optional). +/// 9. **migrate storage**: `decl_storage` provide an upgrade template (see 3.). All storages, +/// genesis config, genesis build and default implementation of genesis config can be +/// taken from it directly. +/// +/// Otherwise here is the manual process: +/// +/// first migrate the genesis logic. write: /// ```ignore /// #[pallet::genesis_config] /// struct GenesisConfig { @@ -2494,79 +2602,85 @@ pub mod pallet_prelude { /// } /// } /// ``` -/// for each storages, if it contains config(..) then add a fields, and make its default to the -/// value in `= ..;` or the type default if none, if it contains no build then also add the -/// logic to build the value. -/// for each storages if it contains build(..) then add the logic to genesis_build. -/// -/// NOTE: in decl_storage: is executed first the individual config and build and at the end the -/// add_extra_genesis build -/// -/// Once this is done you can migrate storage individually, a few notes: -/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, -/// - for storage with `get(fn ..)` use `#[pallet::getter(fn ...)]` -/// - for storage with value being `Option<$something>` make generic `Value` being -/// `$something` -/// and generic `QueryKind` being `OptionQuery` (note: this is default). Otherwise make -/// `Value` the complete value type and `QueryKind` being `ValueQuery`. -/// - for storage with default value: `= $expr;` provide some specific OnEmpty generic. To do -/// so -/// use of `#[pallet::type_value]` to generate the wanted struct to put. -/// example: `MyStorage: u32 = 3u32` would be written: -/// ```ignore +/// for each storage, if it contains `config(..)` then add fields, and make it default to +/// the value in `= ..;` or the type default if none, if it contains no build then also add +/// the logic to build the value. for each storage if it contains `build(..)` then add the +/// logic to `genesis_build`. +/// +/// NOTE: within `decl_storage`: the individual config is executed first, followed by the +/// build and finally the `add_extra_genesis` build. +/// +/// Once this is done you can migrate storages individually, a few notes: +/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, +/// - for storages with `get(fn ..)` use [`#[pallet::getter(fn +/// ...)]`](#palletgetterfn-my_getter_fn_name-optional) +/// - for storages with value being `Option<$something>` make generic `Value` being +/// `$something` and generic `QueryKind` being `OptionQuery` (note: this is default). +/// Otherwise make `Value` the complete value type and `QueryKind` being `ValueQuery`. +/// - for storages with default value: `= $expr;` provide some specific `OnEmpty` generic. +/// To do so use of `#[pallet::type_value]` to generate the wanted struct to put. +/// example: `MyStorage: u32 = 3u32` would be written: +/// +/// ```ignore /// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } /// #[pallet::storage] /// pub(super) type MyStorage = StorageValue<_, u32, ValueQuery, MyStorageOnEmpty>; /// ``` /// -/// NOTE: `decl_storage` also generates functions `assimilate_storage` and `build_storage` -/// directly on GenesisConfig, those are sometimes used in tests. In order not to break they -/// can be implemented manually, one can implement those functions by calling `GenesisBuild` -/// implementation. -/// -/// 10. **migrate origin**: move the origin to the pallet module under `#[pallet::origin]` -/// 11. **migrate validate_unsigned**: move the `ValidateUnsigned` implementation to the pallet -/// module under `#[pallet::validate_unsigned]` -/// 12. **migrate provide_inherent**: move the `ProvideInherent` implementation to the pallet -/// module under `#[pallet::inherent]` +/// NOTE: `decl_storage` also generates the functions `assimilate_storage` and +/// `build_storage` directly on `GenesisConfig`, and these are sometimes used in tests. +/// In order not to break they can be implemented manually, one can implement those +/// functions by calling the `GenesisBuild` implementation. +/// 10. **migrate origin**: move the origin to the pallet module to be under a +/// [`#[pallet::origin]`](#origin-palletorigin-optional) attribute +/// 11. **migrate validate_unsigned**: move the +/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) implementation to the pallet +/// module under a +/// [`#[pallet::validate_unsigned]`](#validate-unsigned-palletvalidate_unsigned-optional) +/// attribute +/// 12. **migrate provide_inherent**: move the +/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) implementation to the pallet +/// module under a [`#[pallet::inherent]`](#inherent-palletinherent-optional) attribute /// 13. rename the usage of `Module` to `Pallet` inside the crate. -/// 14. migration is done, now double check migration with the checking migration guidelines. +/// 14. migration is done, now double check the migration with the checking migration +/// guidelines shown below. /// -/// ## Checking upgrade guidelines: +/// # Checking upgrade guidelines: /// /// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata -/// and do a diff of the resulting json before and after migration. This checks for: -/// * call, names, signature, docs -/// * event names, docs -/// * error names, docs -/// * storage names, hasher, prefixes, default value -/// * error , error, constant, +/// and do a diff of the resulting json before and after migration. This checks for: +/// * call, names, signature, docs +/// * event names, docs +/// * error names, docs +/// * storage names, hasher, prefixes, default value +/// * error, error, constant /// * manually check that: -/// * `Origin` is moved inside the macro under `#[pallet::origin]` if it exists -/// * `ValidateUnsigned` is moved inside the macro under `#[pallet::validate_unsigned)]` if it -/// exists -/// * `ProvideInherent` is moved inside macro under `#[pallet::inherent)]` if it exists -/// * `on_initialize`/`on_finalize`/`on_runtime_upgrade`/`offchain_worker` are moved to -/// `Hooks` -/// implementation -/// * storages with `config(..)` are converted to `GenesisConfig` field, and their default is -/// `= $expr;` if the storage have default value -/// * storages with `build($expr)` or `config(..)` are built in `GenesisBuild::build` -/// * `add_extra_genesis` fields are converted to `GenesisConfig` field with their correct -/// default if specified -/// * `add_extra_genesis` build is written into `GenesisBuild::build` +/// * `Origin` was moved inside the macro under +/// [`#[pallet::origin]`](#origin-palletorigin-optional) if it exists +/// * [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) was moved inside the macro +/// under +/// [`#[pallet::validate_unsigned)]`](#validate-unsigned-palletvalidate_unsigned-optional) +/// if it exists +/// * [`ProvideInherent`](`pallet_prelude::ProvideInherent`) was moved inside the macro +/// under [`#[pallet::inherent)]`](#inherent-palletinherent-optional) if it exists +/// * `on_initialize` / `on_finalize` / `on_runtime_upgrade` / `offchain_worker` were moved +/// to the `Hooks` implementation +/// * storages with `config(..)` were converted to `GenesisConfig` field, and their default +/// is `= $expr;` if the storage has a default value +/// * storages with `build($expr)` or `config(..)` were built in `GenesisBuild::build` +/// * `add_extra_genesis` fields were converted to `GenesisConfig` field with their correct +/// default if specified +/// * `add_extra_genesis` build was written into `GenesisBuild::build` /// * storage items defined with [`pallet`] use the name of the pallet provided by -/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used the -/// `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). -/// Thus a runtime using the pallet must be careful with this change. -/// To handle this change: -/// * either ensure that the name of the pallet given to `construct_runtime!` is the same -/// as the name the pallet was giving to `decl_storage`, -/// * or do a storage migration from the old prefix used to the new prefix used. -/// -/// NOTE: The prefixes used by storage items are in the metadata. Thus, ensuring the metadata -/// hasn't changed does ensure that the `pallet_prefix`s used by the storage items haven't -/// changed. +/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used +/// the `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). Thus +/// a runtime using the pallet must be careful with this change. To handle this change: +/// * either ensure that the name of the pallet given to `construct_runtime!` is the same +/// as the name the pallet was giving to `decl_storage`, +/// * or do a storage migration from the old prefix used to the new prefix used. +/// +/// NOTE: The prefixes used by storage items are in metadata. Thus, ensuring the metadata +/// hasn't changed ensures that the `pallet_prefix`s used by the storage items haven't changed. /// /// # Notes when macro fails to show proper error message spans: /// @@ -2581,3 +2695,13 @@ pub mod pallet_prelude { /// ``` /// * use the newest nightly possible. pub use frame_support_procedural::pallet; + +/// Contains macro stubs for all of the pallet:: macros +pub mod pallet_macros { + pub use frame_support_procedural::{ + call_index, compact, config, constant, disable_frame_system_supertrait_check, error, event, + extra_constants, generate_deposit, generate_storage_info, generate_store, genesis_build, + genesis_config, getter, hooks, inherent, origin, storage, storage_prefix, storage_version, + type_value, unbounded, validate_unsigned, weight, whitelist_storage, + }; +} From 4a5a9dea00c9b4e4d34ff56368451aa4dac09d77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 7 Oct 2022 12:46:57 +0200 Subject: [PATCH 50/75] Upgrade pin-project (#12426) This fixes some warnings on latest nightly. --- Cargo.lock | 8 ++++---- client/network/Cargo.toml | 2 +- client/network/transactions/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6a7c09514325..6cb440433f204 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6805,18 +6805,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 8e3d68851c423..08d0f28394af0 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -32,7 +32,7 @@ linked-hash-map = "0.5.4" log = "0.4.17" lru = "0.7.5" parking_lot = "0.12.1" -pin-project = "1.0.10" +pin-project = "1.0.12" prost = "0.11" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml index 5578bb2c7191e..3b60497d42b9b 100644 --- a/client/network/transactions/Cargo.toml +++ b/client/network/transactions/Cargo.toml @@ -20,7 +20,7 @@ futures = "0.3.21" hex = "0.4.0" libp2p = "0.46.1" log = "0.4.17" -pin-project = "1.0.10" +pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 42bd2ae7276e2..308da96fbbe77 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -31,7 +31,7 @@ parking_lot = "0.12.1" log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" -pin-project = "1.0.10" +pin-project = "1.0.12" hash-db = "0.15.2" serde = "1.0.136" serde_json = "1.0.85" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 4be7c186720fc..0be1268e13d43 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" libp2p = { version = "0.46.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.17" parking_lot = "0.12.1" -pin-project = "1.0.10" +pin-project = "1.0.12" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" From a8e951d094273322e79f4ac8b66dd3715fd5c076 Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 7 Oct 2022 23:16:41 +0900 Subject: [PATCH 51/75] Extend the lower bounds of some of the benchmarks to also include `0` (#12386) * Extend the lower bounds of some of the benchmarks to also include `0` * Fix verify snippet for `pallet_bounties/spend_funds` --- frame/alliance/src/benchmarking.rs | 8 +- frame/benchmarking/src/baseline.rs | 2 +- frame/bounties/src/benchmarking.rs | 10 ++- frame/collective/src/benchmarking.rs | 77 ++++++++++---------- frame/elections-phragmen/src/benchmarking.rs | 2 +- frame/examples/basic/src/benchmarking.rs | 8 +- frame/gilt/src/benchmarking.rs | 4 +- frame/identity/src/benchmarking.rs | 24 +++--- frame/ranked-collective/src/benchmarking.rs | 2 +- frame/staking/src/benchmarking.rs | 11 +-- frame/system/benchmarking/src/lib.rs | 51 ++++++++----- 11 files changed, 109 insertions(+), 90 deletions(-) diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index e07d7c44a97ff..e2e1579fcc9b4 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -832,8 +832,8 @@ benchmarks_instance_pallet! { } add_unscrupulous_items { - let n in 1 .. T::MaxUnscrupulousItems::get(); - let l in 1 .. T::MaxWebsiteUrlLength::get(); + let n in 0 .. T::MaxUnscrupulousItems::get(); + let l in 0 .. T::MaxWebsiteUrlLength::get(); set_members::(); @@ -856,8 +856,8 @@ benchmarks_instance_pallet! { } remove_unscrupulous_items { - let n in 1 .. T::MaxUnscrupulousItems::get(); - let l in 1 .. T::MaxWebsiteUrlLength::get(); + let n in 0 .. T::MaxUnscrupulousItems::get(); + let l in 0 .. T::MaxWebsiteUrlLength::get(); set_members::(); diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index 6a310330cafb1..5fd845551daca 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -90,7 +90,7 @@ benchmarks! { } sr25519_verification { - let i in 1 .. 100; + let i in 0 .. 100; let public = SignerId::generate_pair(None); diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 6ccd587cebc10..07dd781c29af3 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -197,7 +197,7 @@ benchmarks_instance_pallet! { } spend_funds { - let b in 1 .. 100; + let b in 0 .. 100; setup_pot_account::(); create_approved_bounties::(b)?; @@ -214,9 +214,13 @@ benchmarks_instance_pallet! { ); } verify { - ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); ensure!(missed_any == false, "Missed some"); - assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) + if b > 0 { + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); + assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) + } else { + ensure!(budget_remaining == BalanceOf::::max_value(), "Budget used"); + } } impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index 7c444dbaa7b6a..fcebacf5762e7 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -36,68 +36,69 @@ fn assert_last_event, I: 'static>(generic_event: >:: benchmarks_instance_pallet! { set_members { - let m in 1 .. T::MaxMembers::get(); - let n in 1 .. T::MaxMembers::get(); - let p in 1 .. T::MaxProposals::get(); + let m in 0 .. T::MaxMembers::get(); + let n in 0 .. T::MaxMembers::get(); + let p in 0 .. T::MaxProposals::get(); // Set old members. // We compute the difference of old and new members, so it should influence timing. let mut old_members = vec![]; - let mut last_old_member = account::("old member", 0, SEED); for i in 0 .. m { - last_old_member = account::("old member", i, SEED); - old_members.push(last_old_member.clone()); + let old_member = account::("old member", i, SEED); + old_members.push(old_member); } let old_members_count = old_members.len() as u32; Collective::::set_members( SystemOrigin::Root.into(), old_members.clone(), - Some(last_old_member.clone()), + old_members.last().cloned(), T::MaxMembers::get(), )?; - // Set a high threshold for proposals passing so that they stay around. - let threshold = m.max(2); - // Length of the proposals should be irrelevant to `set_members`. - let length = 100; - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { - remark: vec![i as u8; length] - }.into(); - Collective::::propose( - SystemOrigin::Signed(last_old_member.clone()).into(), - threshold, - Box::new(proposal.clone()), - MAX_BYTES, - )?; - let hash = T::Hashing::hash_of(&proposal); - // Vote on the proposal to increase state relevant for `set_members`. - // Not voting for `last_old_member` because they proposed and not voting for the first member - // to keep the proposal from passing. - for j in 2 .. m - 1 { - let voter = &old_members[j as usize]; - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - hash, - i, - approve, + // If there were any old members generate a bunch of proposals. + if m > 0 { + // Set a high threshold for proposals passing so that they stay around. + let threshold = m.max(2); + // Length of the proposals should be irrelevant to `set_members`. + let length = 100; + for i in 0 .. p { + // Proposals should be different so that different proposal hashes are generated + let proposal: T::Proposal = SystemCall::::remark { + remark: vec![i as u8; length] + }.into(); + Collective::::propose( + SystemOrigin::Signed(old_members.last().unwrap().clone()).into(), + threshold, + Box::new(proposal.clone()), + MAX_BYTES, )?; + let hash = T::Hashing::hash_of(&proposal); + // Vote on the proposal to increase state relevant for `set_members`. + // Not voting for last old member because they proposed and not voting for the first member + // to keep the proposal from passing. + for j in 2 .. m - 1 { + let voter = &old_members[j as usize]; + let approve = true; + Collective::::vote( + SystemOrigin::Signed(voter.clone()).into(), + hash, + i, + approve, + )?; + } } } // Construct `new_members`. // It should influence timing since it will sort this vector. let mut new_members = vec![]; - let mut last_member = account::("member", 0, SEED); for i in 0 .. n { - last_member = account::("member", i, SEED); - new_members.push(last_member.clone()); + let member = account::("member", i, SEED); + new_members.push(member); } - }: _(SystemOrigin::Root, new_members.clone(), Some(last_member), T::MaxMembers::get()) + }: _(SystemOrigin::Root, new_members.clone(), new_members.last().cloned(), T::MaxMembers::get()) verify { new_members.sort(); assert_eq!(Collective::::members(), new_members); diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 22d00a912a4f7..06ac8d7c60162 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -362,7 +362,7 @@ benchmarks! { // total number of voters. let v in (T::MaxVoters::get() / 2) .. T::MaxVoters::get(); // those that are defunct and need removal. - let d in 1 .. (T::MaxVoters::get() / 2); + let d in 0 .. (T::MaxVoters::get() / 2); // remove any previous stuff. clean::(); diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index 93e14f358208e..4d1659af46460 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -34,22 +34,22 @@ use frame_system::RawOrigin; // Details on using the benchmarks macro can be seen at: // https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks benchmarks! { - // This will measure the execution time of `set_dummy` for b in [1..1000] range. + // This will measure the execution time of `set_dummy` for b in [0..1000] range. set_dummy_benchmark { // This is the benchmark setup phase - let b in 1 .. 1000; + let b in 0 .. 1000; }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call verify { // This is the optional benchmark verification phase, asserting certain states. assert_eq!(Pallet::::dummy(), Some(b.into())) } - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + // This will measure the execution time of `accumulate_dummy` for b in [0..1000] range. // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same // as the extrinsic call. `_(...)` is used to represent the extrinsic name. // The benchmark verification phase is omitted. accumulate_dummy { - let b in 1 .. 1000; + let b in 0 .. 1000; // The caller account is whitelisted for DB reads/write by the benchmarking macro. let caller: T::AccountId = whitelisted_caller(); }: _(RawOrigin::Signed(caller), b.into()) diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs index 1767a8da4def0..92ebf81854f23 100644 --- a/frame/gilt/src/benchmarking.rs +++ b/frame/gilt/src/benchmarking.rs @@ -97,7 +97,7 @@ benchmarks! { pursue_target_per_item { // bids taken - let b in 1..T::MaxQueueLen::get(); + let b in 0..T::MaxQueueLen::get(); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(b + 1)); @@ -113,7 +113,7 @@ benchmarks! { pursue_target_per_queue { // total queues hit - let q in 1..T::QueueCount::get(); + let q in 0..T::QueueCount::get(); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(q + 1)); diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index c66658b92b0d2..c628387a4d22e 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -130,7 +130,7 @@ benchmarks! { set_identity { let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get(); + let x in 0 .. T::MaxAdditionalFields::get(); let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); @@ -166,7 +166,7 @@ benchmarks! { set_subs_new { let caller: T::AccountId = whitelisted_caller(); // Create a new subs vec with s sub accounts - let s in 1 .. T::MaxSubAccounts::get() => (); + let s in 0 .. T::MaxSubAccounts::get() => (); let subs = create_sub_accounts::(&caller, s)?; ensure!(SubsOf::::get(&caller).1.len() == 0, "Caller already has subs"); }: set_subs(RawOrigin::Signed(caller.clone()), subs) @@ -177,7 +177,7 @@ benchmarks! { set_subs_old { let caller: T::AccountId = whitelisted_caller(); // Give them p many previous sub accounts. - let p in 1 .. T::MaxSubAccounts::get() => { + let p in 0 .. T::MaxSubAccounts::get() => { let _ = add_sub_accounts::(&caller, p)?; }; // Remove all subs. @@ -198,12 +198,12 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 1 .. T::MaxSubAccounts::get() => { + let s in 0 .. T::MaxSubAccounts::get() => { // Give them s many sub accounts let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; }; - let x in 1 .. T::MaxAdditionalFields::get(); + let x in 0 .. T::MaxAdditionalFields::get(); // Create their main identity with x additional fields let info = create_identity_info::(x); @@ -233,7 +233,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get() => { + let x in 0 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); @@ -251,7 +251,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get() => { + let x in 0 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); @@ -332,7 +332,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get(); + let x in 0 .. T::MaxAdditionalFields::get(); let info = create_identity_info::(x); let info_hash = T::Hashing::hash_of(&info); @@ -348,8 +348,8 @@ benchmarks! { kill_identity { let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 1 .. T::MaxSubAccounts::get(); - let x in 1 .. T::MaxAdditionalFields::get(); + let s in 0 .. T::MaxSubAccounts::get(); + let x in 0 .. T::MaxAdditionalFields::get(); let target: T::AccountId = account("target", 0, SEED); let target_origin: ::RuntimeOrigin = RawOrigin::Signed(target.clone()).into(); @@ -379,7 +379,7 @@ benchmarks! { } add_sub { - let s in 1 .. T::MaxSubAccounts::get() - 1; + let s in 0 .. T::MaxSubAccounts::get() - 1; let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; @@ -415,7 +415,7 @@ benchmarks! { } quit_sub { - let s in 1 .. T::MaxSubAccounts::get() - 1; + let s in 0 .. T::MaxSubAccounts::get() - 1; let caller: T::AccountId = whitelisted_caller(); let sup = account("super", 0, SEED); diff --git a/frame/ranked-collective/src/benchmarking.rs b/frame/ranked-collective/src/benchmarking.rs index a4d074450e836..eb629b330abb2 100644 --- a/frame/ranked-collective/src/benchmarking.rs +++ b/frame/ranked-collective/src/benchmarking.rs @@ -138,7 +138,7 @@ benchmarks_instance_pallet! { } cleanup_poll { - let n in 1 .. 100; + let n in 0 .. 100; // Create a poll let class = T::Polls::classes().into_iter().next().unwrap(); diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index c7e6936ac75d8..dcb861e2ce419 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -88,6 +88,7 @@ pub fn create_validator_with_nominators( points_total += 10; points_individual.push((v_stash.clone(), 10)); + let original_nominator_count = Nominators::::count(); let mut nominators = Vec::new(); // Give the validator n nominators, but keep total users in the system the same. @@ -114,7 +115,7 @@ pub fn create_validator_with_nominators( assert_eq!(new_validators.len(), 1); assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); assert_ne!(Validators::::count(), 0); - assert_ne!(Nominators::::count(), 0); + assert_eq!(Nominators::::count(), original_nominator_count + nominators.len() as u32); // Give Era Points let reward = EraRewardPoints:: { @@ -544,7 +545,7 @@ benchmarks! { } payout_stakers_dead_controller { - let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -577,7 +578,7 @@ benchmarks! { } payout_stakers_alive_staked { - let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -695,7 +696,7 @@ benchmarks! { new_era { let v in 1 .. 10; - let n in 1 .. 100; + let n in 0 .. 100; create_validators_with_nominators_for_era::( v, @@ -714,7 +715,7 @@ benchmarks! { #[extra] payout_all { let v in 1 .. 10; - let n in 1 .. 100; + let n in 0 .. 100; create_validators_with_nominators_for_era::( v, n, diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index dbe38da5775a7..0f7603fe1dd9f 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -64,7 +64,7 @@ benchmarks! { #[skip_meta] set_storage { - let i in 1 .. 1000; + let i in 0 .. 1000; // Set up i items to add let mut items = Vec::new(); @@ -72,56 +72,69 @@ benchmarks! { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); items.push((hash.clone(), hash.clone())); } + + let items_to_verify = items.clone(); }: _(RawOrigin::Root, items) verify { - let last_hash = (i, i - 1).using_encoded(T::Hashing::hash); - let value = storage::unhashed::get_raw(last_hash.as_ref()).ok_or("No value stored")?; - assert_eq!(value, last_hash.as_ref().to_vec()); + // Verify that they're actually in the storage. + for (item, _) in items_to_verify { + let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; + assert_eq!(value, *item); + } } #[skip_meta] kill_storage { - let i in 1 .. 1000; + let i in 0 .. 1000; // Add i items to storage - let mut items = Vec::new(); + let mut items = Vec::with_capacity(i as usize); for j in 0 .. i { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); storage::unhashed::put_raw(&hash, &hash); items.push(hash); } - // We will verify this value is removed - let last_hash = (i, i - 1).using_encoded(T::Hashing::hash); - let value = storage::unhashed::get_raw(last_hash.as_ref()).ok_or("No value stored")?; - assert_eq!(value, last_hash.as_ref().to_vec()); + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + let items_to_verify = items.clone(); }: _(RawOrigin::Root, items) verify { - assert_eq!(storage::unhashed::get_raw(last_hash.as_ref()), None); + // Verify that they're not in the storage anymore. + for item in items_to_verify { + assert!(storage::unhashed::get_raw(&item).is_none()); + } } #[skip_meta] kill_prefix { - let p in 1 .. 1000; + let p in 0 .. 1000; let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); + let mut items = Vec::with_capacity(p as usize); // add p items that share a prefix for i in 0 .. p { let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); let key = [&prefix[..], &hash[..]].concat(); storage::unhashed::put_raw(&key, &key); + items.push(key); } - // We will verify this value is removed - let last_hash = (p, p - 1).using_encoded(T::Hashing::hash).as_ref().to_vec(); - let last_key = [&prefix[..], &last_hash[..]].concat(); - let value = storage::unhashed::get_raw(&last_key).ok_or("No value stored")?; - assert_eq!(value, last_key); - + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } }: _(RawOrigin::Root, prefix, p) verify { - assert_eq!(storage::unhashed::get_raw(&last_key), None); + // Verify that they're not in the storage anymore. + for item in items { + assert!(storage::unhashed::get_raw(&item).is_none()); + } } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); From 1a5cdc81c711bc3a7464f24ccd2e1ba20a5ea594 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 7 Oct 2022 17:19:10 +0300 Subject: [PATCH 52/75] BEEFY: Define a `BeefyVerify` trait for signatures (#12299) * Define CustomVerify trait Signed-off-by: Serban Iorga * Use ECDSA CustomVerify for MultiSignature Signed-off-by: Serban Iorga * beefy: small simplifications Signed-off-by: Serban Iorga * Revert "Use ECDSA CustomVerify for MultiSignature" This reverts commit 136cff82505662dd92c864491814629d2bc349f0. * Revert "Define CustomVerify trait" This reverts commit adf91e9e6d1bdea6f00831f6067b74c3d945f9a2. * Define BeefyAuthorityId and BeefyVerify traits * Improve BeefyVerify unit tests Co-authored-by: Robert Hambrock * fmt & import sp_core::blake2_256 * Renamings * remove SignerToAccountId * fix Signed-off-by: Serban Iorga Co-authored-by: Robert Hambrock --- Cargo.lock | 1 + client/beefy/src/keystore.rs | 9 ++--- frame/beefy-mmr/src/lib.rs | 8 +--- primitives/beefy/Cargo.toml | 2 + primitives/beefy/src/lib.rs | 75 ++++++++++++++++++++++++++++++++++- primitives/core/src/crypto.rs | 5 +++ 6 files changed, 87 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6cb440433f204..49b3dd3cf957b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -538,6 +538,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", + "sp-io", "sp-keystore", "sp-mmr-primitives", "sp-runtime", diff --git a/client/beefy/src/keystore.rs b/client/beefy/src/keystore.rs index b0259a42075ea..886c00fc5d817 100644 --- a/client/beefy/src/keystore.rs +++ b/client/beefy/src/keystore.rs @@ -19,12 +19,13 @@ use sp_application_crypto::RuntimeAppPublic; use sp_core::keccak_256; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::traits::Keccak256; use log::warn; use beefy_primitives::{ crypto::{Public, Signature}, - KEY_TYPE, + BeefyVerify, KEY_TYPE, }; use crate::error; @@ -98,11 +99,7 @@ impl BeefyKeystore { /// /// Return `true` if the signature is authentic, `false` otherwise. pub fn verify(public: &Public, sig: &Signature, message: &[u8]) -> bool { - let msg = keccak_256(message); - let sig = sig.as_ref(); - let public = public.as_ref(); - - sp_core::ecdsa::Pair::verify_prehashed(sig, &msg, public) + BeefyVerify::::verify(sig, message, public) } } diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 5b82c89ce84b6..0b7fc22cd279b 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -73,12 +73,8 @@ where /// Convert BEEFY secp256k1 public keys into Ethereum addresses pub struct BeefyEcdsaToEthereum; impl Convert> for BeefyEcdsaToEthereum { - fn convert(a: beefy_primitives::crypto::AuthorityId) -> Vec { - sp_core::ecdsa::Public::try_from(a.as_ref()) - .map_err(|_| { - log::error!(target: "runtime::beefy", "Invalid BEEFY PublicKey format!"); - }) - .unwrap_or(sp_core::ecdsa::Public::from_raw([0u8; 33])) + fn convert(beefy_id: beefy_primitives::crypto::AuthorityId) -> Vec { + sp_core::ecdsa::Public::from(beefy_id) .to_eth_address() .map(|v| v.to_vec()) .map_err(|_| { diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index fe6ce23337c86..f85e2edf4f442 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } +sp-io = { version = "6.0.0", default-features = false, path = "../io" } sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../merkle-mountain-range" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -34,6 +35,7 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", + "sp-io/std", "sp-mmr-primitives/std", "sp-runtime/std", "sp-std/std", diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs index 705366e1b4778..453eb67315d4e 100644 --- a/primitives/beefy/src/lib.rs +++ b/primitives/beefy/src/lib.rs @@ -41,12 +41,30 @@ pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider}; use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; +use sp_application_crypto::RuntimeAppPublic; use sp_core::H256; +use sp_runtime::traits::Hash; use sp_std::prelude::*; /// Key type for BEEFY module. pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"beef"); +/// Trait representing BEEFY authority id. +pub trait BeefyAuthorityId: RuntimeAppPublic {} + +/// Means of verification for a BEEFY authority signature. +/// +/// Accepts custom hashing fn for the message and custom convertor fn for the signer. +pub trait BeefyVerify { + /// Type of the signer. + type Signer: BeefyAuthorityId; + + /// Verify a signature. + /// + /// Return `true` if signature is valid for the value. + fn verify(&self, msg: &[u8], signer: &Self::Signer) -> bool; +} + /// BEEFY cryptographic types /// /// This module basically introduces three crypto types: @@ -60,7 +78,9 @@ pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::Ke /// The current underlying crypto scheme used is ECDSA. This can be changed, /// without affecting code restricted against the above listed crypto types. pub mod crypto { + use super::{BeefyAuthorityId, BeefyVerify, Hash}; use sp_application_crypto::{app_crypto, ecdsa}; + use sp_core::crypto::Wraps; app_crypto!(ecdsa, crate::KEY_TYPE); /// Identity of a BEEFY authority using ECDSA as its crypto. @@ -68,6 +88,26 @@ pub mod crypto { /// Signature for a BEEFY authority using ECDSA as its crypto. pub type AuthoritySignature = Signature; + + impl BeefyAuthorityId for AuthorityId {} + + impl BeefyVerify for AuthoritySignature + where + ::Output: Into<[u8; 32]>, + { + type Signer = AuthorityId; + + fn verify(&self, msg: &[u8], signer: &Self::Signer) -> bool { + let msg_hash = ::hash(msg).into(); + match sp_io::crypto::secp256k1_ecdsa_recover_compressed( + self.as_inner_ref().as_ref(), + &msg_hash, + ) { + Ok(raw_pubkey) => raw_pubkey.as_ref() == AsRef::<[u8]>::as_ref(signer), + _ => false, + } + } + } } /// The `ConsensusEngineId` of BEEFY. @@ -180,7 +220,8 @@ sp_api::decl_runtime_apis! { mod tests { use super::*; use sp_application_crypto::ecdsa::{self, Public}; - use sp_core::Pair; + use sp_core::{blake2_256, crypto::Wraps, keccak_256, Pair}; + use sp_runtime::traits::{BlakeTwo256, Keccak256}; #[test] fn validator_set() { @@ -194,4 +235,36 @@ mod tests { assert_eq!(validators.id(), set_id); assert_eq!(validators.validators(), &vec![alice.public()]); } + + #[test] + fn beefy_verify_works() { + let msg = &b"test-message"[..]; + let (pair, _) = crypto::Pair::generate(); + + let keccak_256_signature: crypto::Signature = + pair.as_inner_ref().sign_prehashed(&keccak_256(msg)).into(); + + let blake2_256_signature: crypto::Signature = + pair.as_inner_ref().sign_prehashed(&blake2_256(msg)).into(); + + // Verification works if same hashing function is used when signing and verifying. + assert!(BeefyVerify::::verify(&keccak_256_signature, msg, &pair.public())); + assert!(BeefyVerify::::verify(&blake2_256_signature, msg, &pair.public())); + // Verification fails if distinct hashing functions are used when signing and verifying. + assert!(!BeefyVerify::::verify(&blake2_256_signature, msg, &pair.public())); + assert!(!BeefyVerify::::verify(&keccak_256_signature, msg, &pair.public())); + + // Other public key doesn't work + let (other_pair, _) = crypto::Pair::generate(); + assert!(!BeefyVerify::::verify( + &keccak_256_signature, + msg, + &other_pair.public() + )); + assert!(!BeefyVerify::::verify( + &blake2_256_signature, + msg, + &other_pair.public() + )); + } } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index db855620a8f0d..06703acea7202 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -984,6 +984,11 @@ pub trait IsWrappedBy: From + Into { pub trait Wraps: Sized { /// The inner type it is wrapping. type Inner: IsWrappedBy; + + /// Get a reference to the inner type that is wrapped. + fn as_inner_ref(&self) -> &Self::Inner { + Self::Inner::from_ref(self) + } } impl IsWrappedBy for T From b7c05622d0912816ac055fe086769f1e2fce575d Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Sat, 8 Oct 2022 13:26:31 -0600 Subject: [PATCH 53/75] fix comment math (#12452) --- frame/ranked-collective/src/lib.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index fa3a473fe7d73..111c5f70efdfa 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -199,11 +199,11 @@ impl Convert for Unit { /// Vote-weight scheme where all voters get one vote plus an additional vote for every excess rank /// they have. I.e.: /// -/// - Each member with no excess rank gets 1 vote; +/// - Each member with an excess rank of 0 gets 1 vote; /// - ...with an excess rank of 1 gets 2 votes; -/// - ...with an excess rank of 2 gets 2 votes; -/// - ...with an excess rank of 3 gets 3 votes; -/// - ...with an excess rank of 4 gets 4 votes. +/// - ...with an excess rank of 2 gets 3 votes; +/// - ...with an excess rank of 3 gets 4 votes; +/// - ...with an excess rank of 4 gets 5 votes. pub struct Linear; impl Convert for Linear { fn convert(r: Rank) -> Votes { @@ -214,11 +214,11 @@ impl Convert for Linear { /// Vote-weight scheme where all voters get one vote plus additional votes for every excess rank /// they have incrementing by one vote for each excess rank. I.e.: /// -/// - Each member with no excess rank gets 1 vote; -/// - ...with an excess rank of 1 gets 2 votes; -/// - ...with an excess rank of 2 gets 3 votes; -/// - ...with an excess rank of 3 gets 6 votes; -/// - ...with an excess rank of 4 gets 10 votes. +/// - Each member with an excess rank of 0 gets 1 vote; +/// - ...with an excess rank of 1 gets 3 votes; +/// - ...with an excess rank of 2 gets 6 votes; +/// - ...with an excess rank of 3 gets 10 votes; +/// - ...with an excess rank of 4 gets 15 votes. pub struct Geometric; impl Convert for Geometric { fn convert(r: Rank) -> Votes { From 460fff965a2fd25d3a75ef436ec265b4c2af47a0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 8 Oct 2022 23:15:25 +0200 Subject: [PATCH 54/75] Remove "to_block" field from BlockRequests (#12447) * Remove "to_block" field from BlockRequests * Maybe fix the tests --- client/network/common/src/sync/message.rs | 2 -- client/network/sync/src/lib.rs | 9 +-------- client/network/sync/src/schema/api.v1.proto | 2 -- client/network/sync/src/warp.rs | 1 - 4 files changed, 1 insertion(+), 13 deletions(-) diff --git a/client/network/common/src/sync/message.rs b/client/network/common/src/sync/message.rs index 27ab2704e6471..db9f747108c9f 100644 --- a/client/network/common/src/sync/message.rs +++ b/client/network/common/src/sync/message.rs @@ -158,8 +158,6 @@ pub mod generic { pub fields: BlockAttributes, /// Start from this block. pub from: FromBlock, - /// End at this block. An implementation defined maximum is used when unspecified. - pub to: Option, /// Sequence direction. pub direction: Direction, /// Maximum number of blocks to return. An implementation defined maximum is used when diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 6ad4a8fbbdcc5..6348490351140 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -650,7 +650,6 @@ where id: 0, fields: BlockAttributes::JUSTIFICATION, from: FromBlock::Hash(request.0), - to: None, direction: Direction::Ascending, max: Some(1), }; @@ -1608,7 +1607,6 @@ where FromBlock::Number(n) => Some(schema::v1::block_request::FromBlock::Number(n.encode())), }, - to_block: request.to.map(|h| h.encode()).unwrap_or_default(), direction: request.direction as i32, max_blocks: request.max.unwrap_or(0), support_multiple_justifications: true, @@ -2252,7 +2250,6 @@ fn ancestry_request(block: NumberFor) -> BlockRequest { id: 0, fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, from: FromBlock::Number(block), - to: None, direction: Direction::Ascending, max: Some(1), } @@ -2368,7 +2365,6 @@ fn peer_block_request( id: 0, fields: attrs, from, - to: None, direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2402,7 +2398,6 @@ fn peer_gap_block_request( id: 0, fields: attrs, from, - to: None, direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2452,7 +2447,6 @@ fn fork_sync_request( id: 0, fields: attributes, from: FromBlock::Hash(*hash), - to: None, direction: Direction::Descending, max: Some(count), }, @@ -2702,8 +2696,7 @@ mod test { assert!(sync.justification_requests().any(|(p, r)| { p == peer_id3 && r.fields == BlockAttributes::JUSTIFICATION && - r.from == FromBlock::Hash(b1_hash) && - r.to == None + r.from == FromBlock::Hash(b1_hash) })); assert_eq!( diff --git a/client/network/sync/src/schema/api.v1.proto b/client/network/sync/src/schema/api.v1.proto index b51137d1d51d4..203b157470a58 100644 --- a/client/network/sync/src/schema/api.v1.proto +++ b/client/network/sync/src/schema/api.v1.proto @@ -23,8 +23,6 @@ message BlockRequest { // Start with given block number. bytes number = 3; } - // End at this block. An implementation defined maximum is used when unspecified. - bytes to_block = 4; // optional // Sequence direction. Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index 4f2a71d98613e..ab8a7c66b9856 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -193,7 +193,6 @@ where fields: BlockAttributes::HEADER | BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, from: FromBlock::Hash(header.hash()), - to: Some(header.hash()), direction: Direction::Ascending, max: Some(1), }; From 73c4f94ce0cb41e35bd7fbc7095590b98a351dbb Mon Sep 17 00:00:00 2001 From: Leszek Wiesner Date: Sun, 9 Oct 2022 11:22:43 +0200 Subject: [PATCH 55/75] Vesting pallet - make WithdrawReasons configurable (#12109) * Vesting pallet - make WithdrawReasons configurable * Update `pallet-vesting` README Co-authored-by: parity-processbot <> --- bin/node/runtime/src/lib.rs | 5 ++++- frame/vesting/README.md | 3 ++- frame/vesting/src/lib.rs | 13 ++++++++++--- frame/vesting/src/mock.rs | 5 ++++- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 34f6988c31643..142173621036d 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -34,7 +34,7 @@ use frame_support::{ traits::{ AsEnsureOriginWithArg, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, + LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, @@ -1374,6 +1374,8 @@ impl pallet_society::Config for Runtime { parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); } impl pallet_vesting::Config for Runtime { @@ -1382,6 +1384,7 @@ impl pallet_vesting::Config for Runtime { type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = pallet_vesting::weights::SubstrateWeight; + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; // `VestingInfo` encode length is 36bytes. 28 schedules gets encoded as 1009 bytes, which is the // highest number of schedules that encodes less than 2^10. const MAX_VESTING_SCHEDULES: u32 = 28; diff --git a/frame/vesting/README.md b/frame/vesting/README.md index c3800eb994d4d..b19a60c5b6824 100644 --- a/frame/vesting/README.md +++ b/frame/vesting/README.md @@ -7,7 +7,8 @@ A simple module providing a means of placing a linear curve on an account's locked balance. This module ensures that there is a lock in place preventing the balance to drop below the *unvested* -amount for any reason other than transaction fee payment. +amount for reason other than the ones specified in `UnvestedFundsAllowedWithdrawReasons` +configuration value. As the amount vested increases over time, the amount unvested reduces. However, locks remain in place and explicit action is needed on behalf of the user to ensure that the amount locked is diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 1ca8d41f9a41c..a92f94baf6cf9 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -24,7 +24,8 @@ //! //! A simple pallet providing a means of placing a linear curve on an account's locked balance. This //! pallet ensures that there is a lock in place preventing the balance to drop below the *unvested* -//! amount for any reason other than transaction fee payment. +//! amount for any reason other than the ones specified in `UnvestedFundsAllowedWithdrawReasons` +//! configuration value. //! //! As the amount vested increases over time, the amount unvested reduces. However, locks remain in //! place and explicit action is needed on behalf of the user to ensure that the amount locked is @@ -170,6 +171,10 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + /// Reasons that determine under which conditions the balance may drop below + /// the unvested amount. + type UnvestedFundsAllowedWithdrawReasons: Get; + /// Maximum number of vesting schedules an account may have at a given moment. const MAX_VESTING_SCHEDULES: u32; } @@ -249,7 +254,9 @@ pub mod pallet { Vesting::::try_append(who, vesting_info) .expect("Too many vesting schedules at genesis."); - let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; + let reasons = + WithdrawReasons::except(T::UnvestedFundsAllowedWithdrawReasons::get()); + T::Currency::set_lock(VESTING_ID, who, locked, reasons); } } @@ -569,7 +576,7 @@ impl Pallet { T::Currency::remove_lock(VESTING_ID, who); Self::deposit_event(Event::::VestingCompleted { account: who.clone() }); } else { - let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; + let reasons = WithdrawReasons::except(T::UnvestedFundsAllowedWithdrawReasons::get()); T::Currency::set_lock(VESTING_ID, who, total_locked_now, reasons); Self::deposit_event(Event::::VestingUpdated { account: who.clone(), diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index c4e520b37c8c8..0bd371a0353f1 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -17,7 +17,7 @@ use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild}, + traits::{ConstU32, ConstU64, GenesisBuild, WithdrawReasons}, }; use sp_core::H256; use sp_runtime::{ @@ -87,6 +87,8 @@ impl pallet_balances::Config for Test { } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); pub static ExistentialDeposit: u64 = 0; } impl Config for Test { @@ -96,6 +98,7 @@ impl Config for Test { const MAX_VESTING_SCHEDULES: u32 = 3; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; } pub struct ExtBuilder { From 0c1ccdaa53556a106aa69c23f19527e435970237 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 10 Oct 2022 10:10:53 +0300 Subject: [PATCH 56/75] Move block announcement protocol config out of `Protocol` (#12441) * Move Role(s) to `sc-network-common` * Introduce `NotificationHandshake` type * Move block announce protocol config creation to `ChainSync` * Include block announcement into `notification_protocols` * Apply review comments * Remove unneeded include * Add missing include * Apply review comments --- .../src/communication/gossip.rs | 2 +- .../src/communication/tests.rs | 5 +- client/finality-grandpa/src/lib.rs | 1 + client/network-gossip/src/bridge.rs | 2 +- client/network-gossip/src/state_machine.rs | 2 +- client/network-gossip/src/validator.rs | 2 +- client/network/Cargo.toml | 2 +- client/network/common/src/config.rs | 28 ++++ client/network/common/src/protocol.rs | 1 + client/network/common/src/protocol/event.rs | 24 +-- client/network/common/src/protocol/role.rs | 121 ++++++++++++++ client/network/common/src/sync/message.rs | 28 +++- client/network/src/behaviour.rs | 5 +- client/network/src/config.rs | 29 +--- client/network/src/lib.rs | 3 +- client/network/src/protocol.rs | 151 ++++-------------- client/network/src/protocol/message.rs | 60 +------ client/network/src/service.rs | 14 +- client/network/src/service/tests.rs | 110 ++++++++++++- client/network/sync/src/lib.rs | 73 ++++++++- client/network/test/src/lib.rs | 19 ++- client/network/transactions/src/lib.rs | 6 +- client/service/src/builder.rs | 14 ++ 23 files changed, 439 insertions(+), 263 deletions(-) create mode 100644 client/network/common/src/protocol/role.rs diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 95efedf7b23b7..1ba5e0da33c96 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -90,7 +90,7 @@ use parity_scale_codec::{Decode, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; use sc_network::{PeerId, ReputationChange}; -use sc_network_common::protocol::event::ObservedRole; +use sc_network_common::protocol::role::ObservedRole; use sc_network_gossip::{MessageIntent, ValidatorContext}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index b73f6cdecdd4f..eab7bb2df50cf 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -28,10 +28,7 @@ use parity_scale_codec::Encode; use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; use sc_network_common::{ config::MultiaddrWithPeerId, - protocol::{ - event::{Event as NetworkEvent, ObservedRole}, - ProtocolName, - }, + protocol::{event::Event as NetworkEvent, role::ObservedRole, ProtocolName}, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSyncForkRequest, NotificationSender, NotificationSenderError, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index d5c05fea78aa2..a7326d57c2bf0 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -695,6 +695,7 @@ pub fn grandpa_peers_set_config( fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. max_notification_size: 1024 * 1024, + handshake: None, set_config: sc_network_common::config::SetConfig { in_peers: 0, out_peers: 0, diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 121fa6dc9a50d..5563b3be35e8d 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -317,7 +317,7 @@ mod tests { use quickcheck::{Arbitrary, Gen, QuickCheck}; use sc_network_common::{ config::MultiaddrWithPeerId, - protocol::event::ObservedRole, + protocol::role::ObservedRole, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NotificationSender, NotificationSenderError, diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 30a2e9d1494be..600383cf5f70d 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -22,7 +22,7 @@ use ahash::AHashSet; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::protocol::{event::ObservedRole, ProtocolName}; +use sc_network_common::protocol::{role::ObservedRole, ProtocolName}; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; use std::{collections::HashMap, iter, sync::Arc, time, time::Instant}; diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 185c2cfeed2c7..77dcc3bdc8791 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use libp2p::PeerId; -use sc_network_common::protocol::event::ObservedRole; +use sc_network_common::protocol::role::ObservedRole; use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 08d0f28394af0..99e6c9e708e7f 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -57,7 +57,7 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3" -async-std = "1.11.0" +async-std = { version = "1.11.0", features = ["attributes"] } rand = "0.7.2" tempfile = "3.1.0" sc-network-light = { version = "0.10.0-dev", path = "./light" } diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index fb23cd0174922..e4a7f04c8d6e8 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -20,6 +20,7 @@ use crate::protocol; +use codec::Encode; use libp2p::{multiaddr, Multiaddr, PeerId}; use std::{fmt, str, str::FromStr}; @@ -199,6 +200,30 @@ impl Default for SetConfig { } } +/// Custom handshake for the notification protocol +#[derive(Debug, Clone)] +pub struct NotificationHandshake(Vec); + +impl NotificationHandshake { + /// Create new `NotificationHandshake` from an object that implements `Encode` + pub fn new(handshake: H) -> Self { + Self(handshake.encode()) + } + + /// Create new `NotificationHandshake` from raw bytes + pub fn from_bytes(bytes: Vec) -> Self { + Self(bytes) + } +} + +impl std::ops::Deref for NotificationHandshake { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + /// Extension to [`SetConfig`] for sets that aren't the default set. /// /// > **Note**: As new fields might be added in the future, please consider using the `new` method @@ -218,6 +243,8 @@ pub struct NonDefaultSetConfig { /// If a fallback is used, it will be reported in /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` pub fallback_names: Vec, + /// Handshake of the protocol + pub handshake: Option, /// Maximum allowed size of single notifications. pub max_notification_size: u64, /// Base configuration. @@ -231,6 +258,7 @@ impl NonDefaultSetConfig { notifications_protocol, max_notification_size, fallback_names: Vec::new(), + handshake: None, set_config: SetConfig { in_peers: 0, out_peers: 0, diff --git a/client/network/common/src/protocol.rs b/client/network/common/src/protocol.rs index 11edc373a2620..04bfaedbcac71 100644 --- a/client/network/common/src/protocol.rs +++ b/client/network/common/src/protocol.rs @@ -27,6 +27,7 @@ use std::{ use libp2p::core::upgrade; pub mod event; +pub mod role; /// The protocol name transmitted on the wire. #[derive(Debug, Clone)] diff --git a/client/network/common/src/protocol/event.rs b/client/network/common/src/protocol/event.rs index 3d8c183da495c..236913df1b120 100644 --- a/client/network/common/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -20,6 +20,7 @@ //! events that happen on the network like DHT get/put results received. use super::ProtocolName; +use crate::protocol::role::ObservedRole; use bytes::Bytes; use libp2p::{core::PeerId, kad::record::Key}; @@ -97,26 +98,3 @@ pub enum Event { messages: Vec<(ProtocolName, Bytes)>, }, } - -/// Role that the peer sent to us during the handshake, with the addition of what our local node -/// knows about that peer. -/// -/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a -/// > node says about itself, while `ObservedRole` is a `Role` merged with the -/// > information known locally about that node. -#[derive(Debug, Clone)] -pub enum ObservedRole { - /// Full node. - Full, - /// Light node. - Light, - /// Third-party authority. - Authority, -} - -impl ObservedRole { - /// Returns `true` for `ObservedRole::Light`. - pub fn is_light(&self) -> bool { - matches!(self, Self::Light) - } -} diff --git a/client/network/common/src/protocol/role.rs b/client/network/common/src/protocol/role.rs new file mode 100644 index 0000000000000..ed22830fd7170 --- /dev/null +++ b/client/network/common/src/protocol/role.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::{self, Encode, EncodeLike, Input, Output}; + +/// Role that the peer sent to us during the handshake, with the addition of what our local node +/// knows about that peer. +/// +/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a +/// > node says about itself, while `ObservedRole` is a `Role` merged with the +/// > information known locally about that node. +#[derive(Debug, Clone)] +pub enum ObservedRole { + /// Full node. + Full, + /// Light node. + Light, + /// Third-party authority. + Authority, +} + +impl ObservedRole { + /// Returns `true` for `ObservedRole::Light`. + pub fn is_light(&self) -> bool { + matches!(self, Self::Light) + } +} + +/// Role of the local node. +#[derive(Debug, Clone)] +pub enum Role { + /// Regular full node. + Full, + /// Actual authority. + Authority, +} + +impl Role { + /// True for [`Role::Authority`]. + pub fn is_authority(&self) -> bool { + matches!(self, Self::Authority) + } +} + +impl std::fmt::Display for Role { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Full => write!(f, "FULL"), + Self::Authority => write!(f, "AUTHORITY"), + } + } +} + +bitflags::bitflags! { + /// Bitmask of the roles that a node fulfills. + pub struct Roles: u8 { + /// No network. + const NONE = 0b00000000; + /// Full node, does not participate in consensus. + const FULL = 0b00000001; + /// Light client node. + const LIGHT = 0b00000010; + /// Act as an authority + const AUTHORITY = 0b00000100; + } +} + +impl Roles { + /// Does this role represents a client that holds full chain data locally? + pub fn is_full(&self) -> bool { + self.intersects(Self::FULL | Self::AUTHORITY) + } + + /// Does this role represents a client that does not participates in the consensus? + pub fn is_authority(&self) -> bool { + *self == Self::AUTHORITY + } + + /// Does this role represents a client that does not hold full chain data locally? + pub fn is_light(&self) -> bool { + !self.is_full() + } +} + +impl<'a> From<&'a Role> for Roles { + fn from(roles: &'a Role) -> Self { + match roles { + Role::Full => Self::FULL, + Role::Authority => Self::AUTHORITY, + } + } +} + +impl Encode for Roles { + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } +} + +impl EncodeLike for Roles {} + +impl codec::Decode for Roles { + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) + } +} diff --git a/client/network/common/src/sync/message.rs b/client/network/common/src/sync/message.rs index db9f747108c9f..346f1dbce9bcc 100644 --- a/client/network/common/src/sync/message.rs +++ b/client/network/common/src/sync/message.rs @@ -19,10 +19,12 @@ //! Network packet message types. These get serialized and put into the lower level protocol //! payload. +use crate::protocol::role::Roles; + use bitflags::bitflags; use codec::{Decode, Encode, Error, Input, Output}; pub use generic::{BlockAnnounce, FromBlock}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; /// Type alias for using the block request type using block type parameters. pub type BlockRequest = @@ -218,3 +220,27 @@ pub mod generic { } } } + +/// Handshake sent when we open a block announces substream. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct BlockAnnouncesHandshake { + /// Roles of the node. + pub roles: Roles, + /// Best block number. + pub best_number: NumberFor, + /// Best block hash. + pub best_hash: B::Hash, + /// Genesis block hash. + pub genesis_hash: B::Hash, +} + +impl BlockAnnouncesHandshake { + pub fn build( + roles: Roles, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> Self { + Self { genesis_hash, roles, best_number, best_hash } + } +} diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 14962c837aa10..b31f36eb46692 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -19,7 +19,7 @@ use crate::{ discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, peer_info, - protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, + protocol::{CustomMessageOutcome, NotificationsSink, Protocol}, request_responses, }; @@ -41,7 +41,8 @@ use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; use sc_network_common::{ config::ProtocolId, protocol::{ - event::{DhtEvent, ObservedRole}, + event::DhtEvent, + role::{ObservedRole, Roles}, ProtocolName, }, request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, diff --git a/client/network/src/config.rs b/client/network/src/config.rs index b2adfa81d065b..db3e8f0b98a33 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -23,6 +23,7 @@ pub use sc_network_common::{ config::ProtocolId, + protocol::role::Role, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, @@ -93,6 +94,9 @@ where /// Registry for recording prometheus metrics to. pub metrics_registry: Option, + /// Block announce protocol configuration + pub block_announce_config: NonDefaultSetConfig, + /// Request response configuration for the block request protocol. /// /// [`RequestResponseConfig::name`] is used to tag outgoing block requests with the correct @@ -130,31 +134,6 @@ where pub request_response_protocol_configs: Vec, } -/// Role of the local node. -#[derive(Debug, Clone)] -pub enum Role { - /// Regular full node. - Full, - /// Actual authority. - Authority, -} - -impl Role { - /// True for [`Role::Authority`]. - pub fn is_authority(&self) -> bool { - matches!(self, Self::Authority { .. }) - } -} - -impl fmt::Display for Role { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Full => write!(f, "FULL"), - Self::Authority { .. } => write!(f, "AUTHORITY"), - } - } -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index d17f47328b804..27f2a938154fe 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -260,7 +260,8 @@ pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use protocol::PeerInfo; pub use sc_network_common::{ protocol::{ - event::{DhtEvent, Event, ObservedRole}, + event::{DhtEvent, Event}, + role::ObservedRole, ProtocolName, }, request_responses::{IfDisconnected, RequestFailure}, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index fbf651de9d49a..325e044527efa 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -31,10 +31,7 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, info, log, trace, warn, Level}; -use message::{ - generic::{Message as GenericMessage, Roles}, - Message, -}; +use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::HeaderBackend; @@ -42,13 +39,14 @@ use sc_consensus::import_queue::{ BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, }; use sc_network_common::{ - config::{NonReservedPeerMode, ProtocolId}, + config::NonReservedPeerMode, error, - protocol::ProtocolName, + protocol::{role::Roles, ProtocolName}, request_responses::RequestFailure, sync::{ message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, + BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, + BlockResponse, BlockState, }, warp::{EncodedProof, WarpProofRequest}, BadPeer, ChainSync, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, @@ -85,8 +83,6 @@ const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead -/// Maximum allowed size for a block announce. -const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; /// Maximum size used for notifications in the block announce and transaction protocols. // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. @@ -235,30 +231,6 @@ pub struct PeerInfo { pub best_number: ::Number, } -/// Handshake sent when we open a block announces substream. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -struct BlockAnnouncesHandshake { - /// Roles of the node. - roles: Roles, - /// Best block number. - best_number: NumberFor, - /// Best block hash. - best_hash: B::Hash, - /// Genesis block hash. - genesis_hash: B::Hash, -} - -impl BlockAnnouncesHandshake { - fn build( - roles: Roles, - best_number: NumberFor, - best_hash: B::Hash, - genesis_hash: B::Hash, - ) -> Self { - Self { genesis_hash, roles, best_number, best_hash } - } -} - impl Protocol where B: BlockT, @@ -268,12 +240,10 @@ where pub fn new( roles: Roles, chain: Arc, - protocol_id: ProtocolId, - fork_id: &Option, network_config: &config::NetworkConfiguration, - notifications_protocols_handshakes: Vec>, metrics_registry: Option<&Registry>, chain_sync: Box>, + block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); @@ -365,51 +335,24 @@ where sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) }; - let block_announces_protocol = { - let genesis_hash = - chain.hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!( - "/{}/{}/block-announces/1", - array_bytes::bytes2hex("", genesis_hash), - fork_id - ) - } else { - format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) - } - }; - - let legacy_ba_protocol_name = format!("/{}/block-announces/1", protocol_id.as_ref()); - let behaviour = { - let best_number = info.best_number; - let best_hash = info.best_hash; - let genesis_hash = info.genesis_hash; - - let block_announces_handshake = - BlockAnnouncesHandshake::::build(roles, best_number, best_hash, genesis_hash) - .encode(); - - let sync_protocol_config = notifications::ProtocolConfig { - name: block_announces_protocol.into(), - fallback_names: iter::once(legacy_ba_protocol_name.into()).collect(), - handshake: block_announces_handshake, - max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, - }; - Notifications::new( peerset, - iter::once(sync_protocol_config).chain( - network_config.extra_sets.iter().zip(notifications_protocols_handshakes).map( - |(s, hs)| notifications::ProtocolConfig { - name: s.notifications_protocol.clone(), - fallback_names: s.fallback_names.clone(), - handshake: hs, - max_notification_size: s.max_notification_size, - }, - ), - ), + // NOTE: Block announcement protocol is still very much hardcoded into `Protocol`. + // This protocol must be the first notification protocol given to + // `Notifications` + iter::once(notifications::ProtocolConfig { + name: block_announces_protocol.notifications_protocol.clone(), + fallback_names: block_announces_protocol.fallback_names.clone(), + handshake: block_announces_protocol.handshake.as_ref().unwrap().to_vec(), + max_notification_size: block_announces_protocol.max_notification_size, + }) + .chain(network_config.extra_sets.iter().map(|s| notifications::ProtocolConfig { + name: s.notifications_protocol.clone(), + fallback_names: s.fallback_names.clone(), + handshake: s.handshake.as_ref().map_or(roles.encode(), |h| (*h).to_vec()), + max_notification_size: s.max_notification_size, + })), ) }; @@ -437,10 +380,8 @@ where }, peerset_handle: peerset_handle.clone(), behaviour, - notification_protocols: network_config - .extra_sets - .iter() - .map(|s| s.notifications_protocol.clone()) + notification_protocols: iter::once(block_announces_protocol.notifications_protocol) + .chain(network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone())) .collect(), bad_handshake_substreams: Default::default(), metrics: if let Some(r) = metrics_registry { @@ -469,10 +410,7 @@ where pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: ProtocolName) { if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { - self.behaviour.disconnect_peer( - peer_id, - sc_peerset::SetId::from(position + NUM_HARDCODED_PEERSETS), - ); + self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position)); } else { warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") } @@ -1095,8 +1033,7 @@ where /// Sets the list of reserved peers for the given protocol/peerset. pub fn set_reserved_peerset_peers(&self, protocol: ProtocolName, peers: HashSet) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle - .set_reserved_peers(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peers); + self.peerset_handle.set_reserved_peers(sc_peerset::SetId::from(index), peers); } else { error!( target: "sub-libp2p", @@ -1109,10 +1046,7 @@ where /// Removes a `PeerId` from the list of reserved peers. pub fn remove_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_reserved_peer( - sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), - peer, - ); + self.peerset_handle.remove_reserved_peer(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1125,8 +1059,7 @@ where /// Adds a `PeerId` to the list of reserved peers. pub fn add_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle - .add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle.add_reserved_peer(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1148,8 +1081,7 @@ where /// Add a peer to a peers set. pub fn add_to_peers_set(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle - .add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle.add_to_peers_set(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1162,10 +1094,7 @@ where /// Remove a peer from a peers set. pub fn remove_from_peers_set(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_from_peers_set( - sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), - peer, - ); + self.peerset_handle.remove_from_peers_set(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1627,14 +1556,12 @@ where } } else { match ( - message::Roles::decode_all(&mut &received_handshake[..]), + Roles::decode_all(&mut &received_handshake[..]), self.peers.get(&peer_id), ) { (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), negotiated_fallback, roles, notifications_sink, @@ -1646,9 +1573,7 @@ where // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), negotiated_fallback, roles: peer.info.roles, notifications_sink, @@ -1672,9 +1597,7 @@ where } else { CustomMessageOutcome::NotificationStreamReplaced { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), notifications_sink, } }, @@ -1699,9 +1622,7 @@ where } else { CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), } } }, @@ -1734,9 +1655,7 @@ where _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => CustomMessageOutcome::None, _ => { - let protocol_name = self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(); + let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); CustomMessageOutcome::NotificationsReceived { remote: peer_id, messages: vec![(protocol_name, message.freeze())], diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 3e1281753b82c..ef652387d2c7d 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -21,7 +21,7 @@ pub use self::generic::{ RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse, RemoteHeaderRequest, - RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, Roles, + RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, }; use codec::{Decode, Encode}; use sc_client_api::StorageProof; @@ -57,11 +57,11 @@ pub struct RemoteReadResponse { /// Generic types. pub mod generic { use super::{RemoteCallResponse, RemoteReadResponse}; - use bitflags::bitflags; - use codec::{Decode, Encode, Input, Output}; + use codec::{Decode, Encode, Input}; use sc_client_api::StorageProof; use sc_network_common::{ message::RequestId, + protocol::role::Roles, sync::message::{ generic::{BlockRequest, BlockResponse}, BlockAnnounce, @@ -69,60 +69,6 @@ pub mod generic { }; use sp_runtime::ConsensusEngineId; - bitflags! { - /// Bitmask of the roles that a node fulfills. - pub struct Roles: u8 { - /// No network. - const NONE = 0b00000000; - /// Full node, does not participate in consensus. - const FULL = 0b00000001; - /// Light client node. - const LIGHT = 0b00000010; - /// Act as an authority - const AUTHORITY = 0b00000100; - } - } - - impl Roles { - /// Does this role represents a client that holds full chain data locally? - pub fn is_full(&self) -> bool { - self.intersects(Self::FULL | Self::AUTHORITY) - } - - /// Does this role represents a client that does not participates in the consensus? - pub fn is_authority(&self) -> bool { - *self == Self::AUTHORITY - } - - /// Does this role represents a client that does not hold full chain data locally? - pub fn is_light(&self) -> bool { - !self.is_full() - } - } - - impl<'a> From<&'a crate::config::Role> for Roles { - fn from(roles: &'a crate::config::Role) -> Self { - match roles { - crate::config::Role::Full => Self::FULL, - crate::config::Role::Authority { .. } => Self::AUTHORITY, - } - } - } - - impl codec::Encode for Roles { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } - } - - impl codec::EncodeLike for Roles {} - - impl codec::Decode for Roles { - fn decode(input: &mut I) -> Result { - Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) - } - } - /// Consensus is mostly opaque to us #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ConsensusMessage { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 180482e75ece2..28e479b702779 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -34,14 +34,10 @@ use crate::{ network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, - protocol::{ - self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, - Ready, - }, + protocol::{self, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready}, transport, ReputationChange, }; -use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{either::EitherError, upgrade, ConnectedPoint, Executor}, @@ -222,19 +218,13 @@ where local_peer_id.to_base58(), ); - let default_notif_handshake_message = Roles::from(¶ms.role).encode(); - let (protocol, peerset_handle, mut known_addresses) = Protocol::new( From::from(¶ms.role), params.chain.clone(), - params.protocol_id.clone(), - ¶ms.fork_id, ¶ms.network_config, - (0..params.network_config.extra_sets.len()) - .map(|_| default_notif_handshake_message.clone()) - .collect(), params.metrics_registry.as_ref(), params.chain_sync, + params.block_announce_config, )?; // List of multiaddresses that we know in the network. diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index c8f137f79c6dc..7c651c675b83e 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -20,10 +20,15 @@ use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; +use sc_client_api::{BlockBackend, HeaderBackend}; use sc_network_common::{ - config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, - protocol::event::Event, + config::{ + MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, + ProtocolId, SetConfig, TransportConfig, + }, + protocol::{event::Event, role::Roles}, service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, + sync::message::BlockAnnouncesHandshake, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ @@ -31,7 +36,7 @@ use sc_network_sync::{ ChainSync, }; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header as _}; +use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; @@ -132,7 +137,33 @@ fn build_test_full_node( None, ) .unwrap(); + + let block_announce_config = NonDefaultSetConfig { + notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), + fallback_names: vec![], + max_notification_size: 1024 * 1024, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< + substrate_test_runtime_client::runtime::Block, + >::build( + Roles::from(&config::Role::Full), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ))), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + }; + let worker = NetworkWorker::new(config::Params { + block_announce_config, role: config::Role::Full, executor: None, network_config, @@ -161,6 +192,7 @@ fn build_test_full_node( (service, event_stream) } +const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; const PROTOCOL_NAME: &str = "/foo"; /// Builds two nodes and their associated events stream. @@ -178,6 +210,7 @@ fn build_nodes_one_proto() -> ( notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], @@ -190,6 +223,7 @@ fn build_nodes_one_proto() -> ( notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -368,6 +402,7 @@ fn lots_of_incoming_peers_works() { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { in_peers: u32::MAX, ..Default::default() }, }], transport: TransportConfig::MemoryOnly, @@ -387,6 +422,7 @@ fn lots_of_incoming_peers_works() { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), @@ -504,6 +540,7 @@ fn fallback_name_working() { notifications_protocol: NEW_PROTOCOL_NAME.into(), fallback_names: vec![PROTOCOL_NAME.into()], max_notification_size: 1024 * 1024, + handshake: None, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], @@ -516,6 +553,7 @@ fn fallback_name_working() { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -561,6 +599,72 @@ fn fallback_name_working() { }); } +// Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement +// protocol name and verify that `SyncDisconnected` event is emitted +#[async_std::test] +async fn disconnect_sync_peer_using_block_announcement_protocol_name() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME.into(), + fallback_names: vec![], + max_notification_size: 1024 * 1024, + handshake: None, + set_config: Default::default(), + }], + listen_addresses: vec![listen_addr.clone()], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + let (node2, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME.into(), + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + handshake: None, + set_config: SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id(), + }], + ..Default::default() + }, + }], + listen_addresses: vec![], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {}, + }; + } + + loop { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {}, + }; + } + + // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted + node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); + assert!(std::matches!( + events_stream2.next().await, + Some(Event::NotificationStreamClosed { .. }) + )); + let _ = events_stream2.next().await; // ignore the reopen event + + // now disconnect using the block announcement protocol, verify that `SyncDisconnected` is + // emitted + node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); + assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); +} + #[test] #[should_panic(expected = "don't match the transport")] fn ensure_listen_addresses_consistent_with_transport_memory() { diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 6348490351140..280e530eca9a9 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -50,15 +50,21 @@ use log::{debug, error, info, trace, warn}; use prost::Message; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; -use sc_network_common::sync::{ - message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, - FromBlock, +use sc_network_common::{ + config::{ + NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, + }, + protocol::role::Roles, + sync::{ + message::{ + BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, + BlockResponse, Direction, FromBlock, + }, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, + PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, }, - warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, - BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, - OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, - PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, }; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; @@ -76,6 +82,7 @@ use sp_runtime::{ }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, + iter, ops::Range, pin::Pin, sync::Arc, @@ -121,6 +128,9 @@ const MAJOR_SYNC_BLOCKS: u8 = 5; /// Number of peers that need to be connected before warp sync is started. const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; +/// Maximum allowed size for a block announce. +const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; + mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a @@ -2231,6 +2241,53 @@ where } None } + + /// Get config for the block announcement protocol + pub fn get_block_announce_proto_config( + &self, + protocol_id: ProtocolId, + fork_id: &Option, + roles: Roles, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> NonDefaultSetConfig { + let block_announces_protocol = { + let genesis_hash = genesis_hash.as_ref(); + if let Some(ref fork_id) = fork_id { + format!( + "/{}/{}/block-announces/1", + array_bytes::bytes2hex("", genesis_hash), + fork_id + ) + } else { + format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) + } + }; + + NonDefaultSetConfig { + notifications_protocol: block_announces_protocol.into(), + fallback_names: iter::once( + format!("/{}/block-announces/1", protocol_id.as_ref()).into(), + ) + .collect(), + max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + roles, + best_number, + best_hash, + genesis_hash, + ))), + // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement + // protocol is still hardcoded into the peerset. + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } } // This is purely during a backwards compatible transitionary period and should be removed diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 9d5abf98ceff0..a7c58631dc0f7 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -55,7 +55,7 @@ use sc_network_common::{ config::{ MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, }, - protocol::ProtocolName, + protocol::{role::Roles, ProtocolName}, service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; @@ -77,7 +77,7 @@ use sp_core::H256; use sp_runtime::{ codec::{Decode, Encode}, generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, }; use substrate_test_runtime_client::AccountKeyring; @@ -802,6 +802,7 @@ where notifications_protocol: p, fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: Default::default(), }) .collect(); @@ -879,6 +880,19 @@ where Some(warp_sync), ) .unwrap(); + let block_announce_config = chain_sync.get_block_announce_proto_config( + protocol_id.clone(), + &fork_id, + Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ); + let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, @@ -889,6 +903,7 @@ where import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, + block_announce_config, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index b75bd411b39c4..5239a94ef23f3 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -35,10 +35,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network_common::{ config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, error, - protocol::{ - event::{Event, ObservedRole}, - ProtocolName, - }, + protocol::{event::Event, role::ObservedRole, ProtocolName}, service::{NetworkEventStream, NetworkNotification, NetworkPeers}, utils::{interval, LruHashSet}, ExHashT, @@ -145,6 +142,7 @@ impl TransactionsHandlerPrototype { notifications_protocol: self.protocol_name.clone(), fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, + handshake: None, set_config: SetConfig { in_peers: 0, out_peers: 0, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index dfd532a14c172..4301e17a8c31e 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -40,6 +40,7 @@ use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ + protocol::role::Roles, service::{NetworkStateInfo, NetworkStatusProvider}, sync::warp::WarpSyncProvider, }; @@ -843,6 +844,18 @@ where config.network.max_parallel_downloads, warp_sync_provider, )?; + let block_announce_config = chain_sync.get_block_announce_proto_config( + protocol_id.clone(), + &config.chain_spec.fork_id().map(ToOwned::to_owned), + Roles::from(&config.role.clone()), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ); request_response_protocol_configs.push(config.network.ipfs_server.then(|| { let (handler, protocol_config) = BitswapRequestHandler::new(client.clone()); @@ -865,6 +878,7 @@ where import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), + block_announce_config, block_request_protocol_config, state_request_protocol_config, warp_sync_protocol_config, From df81976c40e2e0573d59c51e12eb3c15c3ff3057 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 10 Oct 2022 16:25:11 +0200 Subject: [PATCH 57/75] Dont ignore errors in pallet benchmarking (#12449) Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- .../benchmarking-cli/src/pallet/command.rs | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 72592617c52ac..daf4aa74e1394 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -297,16 +297,15 @@ impl PalletCmd { for (s, selected_components) in all_components.iter().enumerate() { // First we run a verification if !self.no_verify { - // Dont use these results since verification code will add overhead let state = &state_without_tracking; - let _results = StateMachine::new( + let result = StateMachine::new( state, &mut changes, &executor, "Benchmark_dispatch_benchmark", &( - &pallet.clone(), - &extrinsic.clone(), + &pallet, + &extrinsic, &selected_components.clone(), true, // run verification code 1, // no need to do internal repeats @@ -321,6 +320,20 @@ impl PalletCmd { .map_err(|e| { format!("Error executing and verifying runtime benchmark: {}", e) })?; + // Dont use these results since verification code will add overhead. + let _batch = + , String> as Decode>::decode( + &mut &result[..], + ) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))? + .map_err(|e| { + format!( + "Benchmark {}::{} failed: {}", + String::from_utf8_lossy(&pallet), + String::from_utf8_lossy(&extrinsic), + e + ) + })?; } // Do one loop of DB tracking. { From 9672c362240d77d11f3bd56c701743f0ed86f84b Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 10 Oct 2022 21:04:51 +0300 Subject: [PATCH 58/75] Update UI tests for Rust 1.64 (#12440) * Update UI tests for Rust 1.64 * Test with the staging image * Switch back to production --- .../no_std_genesis_config.stderr | 2 +- .../pallet_error_too_large.stderr | 2 +- .../undefined_call_part.stderr | 2 +- .../undefined_event_part.stderr | 2 +- .../undefined_genesis_config_part.stderr | 2 +- .../undefined_inherent_part.stderr | 12 +++++------ .../undefined_origin_part.stderr | 8 +++++-- .../undefined_validate_unsigned_part.stderr | 21 ++++++++++++------- ...ed_keyword_two_times_integrity_test.stderr | 2 +- ...ved_keyword_two_times_on_initialize.stderr | 4 ++-- .../tests/derive_no_bound_ui/debug.stderr | 4 ++-- .../call_argument_invalid_bound.stderr | 2 +- .../call_argument_invalid_bound_2.stderr | 16 +++++++------- .../call_argument_invalid_bound_3.stderr | 2 +- .../pallet_ui/event_field_not_member.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 4 ++-- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 4 ++-- .../ui/impl_incorrect_method_signature.stderr | 17 +++++++++++++-- ...reference_in_impl_runtime_apis_call.stderr | 16 ++++++++++++-- 19 files changed, 80 insertions(+), 44 deletions(-) diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 1f08ab87c1f79..26c0717c0ad37 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -10,7 +10,7 @@ error: `Pallet` does not have the std feature enabled, this will cause the `test 49 | | } | |_^ | - = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in crate `test_pallet` --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr index 161873866b6f3..99a543eef7a8a 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr @@ -10,4 +10,4 @@ error[E0080]: evaluation of constant value failed 83 | | } | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:74:1 | - = note: this error originates in the macro `$crate::panic::panic_2021` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index c162a22bb87b0..6baf01e866f64 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -13,4 +13,4 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 920e627d43c31..8f2bf7be15749 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::event] defined, perhaps you should remov 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Event` in module `pallet` --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 1bc109a45ac57..aae3aaa80c865 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you sho 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in module `pallet` --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 9f646469d86a8..74af0c264cd5e 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -13,13 +13,13 @@ error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should re 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `create_inherent` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `create_inherent` not found for this + | -------------------- function or associated item `create_inherent` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -39,7 +39,7 @@ error[E0599]: no function or associated item named `is_inherent` found for struc --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `is_inherent` not found for this + | -------------------- function or associated item `is_inherent` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -59,7 +59,7 @@ error[E0599]: no function or associated item named `check_inherent` found for st --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `check_inherent` not found for this + | -------------------- function or associated item `check_inherent` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -79,7 +79,7 @@ error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `p --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ associated item `INHERENT_IDENTIFIER` not found for this + | -------------------- associated item `INHERENT_IDENTIFIER` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -99,7 +99,7 @@ error[E0599]: no function or associated item named `is_inherent_required` found --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `is_inherent_required` not found for this + | -------------------- function or associated item `is_inherent_required` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index b93a2a92911ea..1a8fe64da1758 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remo 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Origin` in module `pallet` --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 @@ -56,6 +56,10 @@ error[E0282]: type annotations needed ... | 57 | | } 58 | | } - | |_^ cannot infer type for type parameter `AccountId` declared on the enum `RawOrigin` + | |_^ cannot infer type of the type parameter `AccountId` declared on the enum `RawOrigin` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider specifying the generic argument + | +58 | }:: + | +++++++++++++ diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index a5e4fe3c1cd5a..6f0b13c58933e 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -13,22 +13,27 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:56:3 | -49 | construct_runtime! { - | ------------------ variant or associated item `Pallet` not found here -... -56 | Pallet: pallet::{Pallet, ValidateUnsigned}, - | ^^^^^^ variant or associated item not found in `RuntimeCall` +49 | / construct_runtime! { +50 | | pub enum Runtime where +51 | | Block = Block, +52 | | NodeBlock = Block, +... | +56 | | Pallet: pallet::{Pallet, ValidateUnsigned}, + | | ^^^^^^ variant or associated item not found in `RuntimeCall` +57 | | } +58 | | } + | |_- variant or associated item `Pallet` not found for this enum error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `pre_dispatch` not found for this + | -------------------- function or associated item `pre_dispatch` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -49,7 +54,7 @@ error[E0599]: no function or associated item named `validate_unsigned` found for --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `validate_unsigned` not found for this + | -------------------- function or associated item `validate_unsigned` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 47ff1c8af8cd2..4212707599d41 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -10,7 +10,7 @@ error: `integrity_test` can only be passed once as input. 7 | | } | |_^ | - = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` which comes from the expansion of the macro `frame_support::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0601]: `main` function not found in crate `$CRATE` --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:7:2 diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index bfefadee99403..94bde853e4cc8 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -1,5 +1,5 @@ error: `on_initialize` can only be passed once as input. - --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 + --> tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { 2 | | pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { @@ -10,4 +10,4 @@ error: `on_initialize` can only be passed once as input. 11 | | } | |_^ | - = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` which comes from the expansion of the macro `frame_support::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/frame/support/test/tests/derive_no_bound_ui/debug.stderr index 7580cab2ea0b3..acc7f80b37663 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/debug.stderr @@ -1,8 +1,8 @@ error[E0277]: `::C` doesn't implement `std::fmt::Debug` - --> $DIR/debug.rs:7:2 + --> tests/derive_no_bound_ui/debug.rs:7:2 | 7 | c: T::C, | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::C` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `::C` to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 1d581ea7ed572..86e8d33c8dad1 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -6,7 +6,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index b1487776eac50..c6acccaaba7d4 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -6,7 +6,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 @@ -21,21 +21,23 @@ error[E0369]: binary operation `==` cannot be applied to type `&::Bar: WrapperTypeEncode` is not satisfied - --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 + --> tests/pallet_ui/call_argument_invalid_bound_2.rs:1:1 | -1 | / #[frame_support::pallet] +1 | #[frame_support::pallet] + | ^----------------------- + | | + | _in this procedural macro expansion + | | 2 | | mod pallet { 3 | | use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; 4 | | use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; ... | 16 | | 17 | | #[pallet::call] - | |__________________- required by a bound introduced by this call -... -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` + | |__________________^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | = note: required because of the requirements on the impl of `Encode` for `::Bar` + = note: this error originates in the derive macro `frame_support::codec::Encode` which comes from the expansion of the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:17:12 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index a0418760ba7e2..4a0b2ea67c7d6 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -7,7 +7,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` = help: the trait `std::fmt::Debug` is not implemented for `Bar` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&Bar` to the object type `dyn std::fmt::Debug` help: consider annotating `Bar` with `#[derive(Debug)]` | 17 | #[derive(Debug)] diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 92623e0329fe3..f95da9deef90a 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -18,4 +18,4 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index b8a9a1128d669..cced83f207c41 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 5032f63bc1b1b..ab377e05d3901 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 2c47c2f480add..f5b6ac1da4576 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -18,5 +18,18 @@ note: type in trait error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:19:11 | -19 | fn test(data: String) {} - | ^^^^ expected `u64`, found struct `std::string::String` +17 | / sp_api::impl_runtime_apis! { +18 | | impl self::Api for Runtime { +19 | | fn test(data: String) {} + | | ^^^^ expected `u64`, found struct `std::string::String` +20 | | } +... | +32 | | } +33 | | } + | |_- arguments to this function are incorrect + | +note: associated function defined here + --> tests/ui/impl_incorrect_method_signature.rs:13:6 + | +13 | fn test(data: u64); + | ^^^^ diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 479e1cf05a9d1..6a99dcd3a1aed 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -18,9 +18,21 @@ note: type in trait error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 | -19 | fn test(data: &u64) { - | ^^^^^^^ expected `u64`, found `&u64` +17 | / sp_api::impl_runtime_apis! { +18 | | impl self::Api for Runtime { +19 | | fn test(data: &u64) { + | | ^^^^^^^ expected `u64`, found `&u64` +20 | | unimplemented!() +... | +34 | | } +35 | | } + | |_- arguments to this function are incorrect + | +note: associated function defined here + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:13:6 | +13 | fn test(data: u64); + | ^^^^ help: consider removing the borrow | 19 - fn test(data: &u64) { From 488fc24d98cfe643402b86990ae0aff27ba927b3 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 11 Oct 2022 11:49:12 +0300 Subject: [PATCH 59/75] rpc: Implement `transaction` RPC API (#12328) * rpc/tx: Add transaction structures for serialization Signed-off-by: Alexandru Vasile * rpc/tx: Add public facing `TransactionEvent` To circumvent the fact that serde does not allow mixing `#[serde(tag = "event")]` with `#[serde(tag = "event", content = "block")]` the public facing subscription structure is serialized and deserialized to an intermmediate representation. Signed-off-by: Alexandru Vasile * rpc/tx: Add trait for the `transaction` API Signed-off-by: Alexandru Vasile * rpc/tx: Convert RPC errors to transaction events Signed-off-by: Alexandru Vasile * rpc/tx: Implement `transaction` RPC methods Signed-off-by: Alexandru Vasile * tx-pool: Propagate tx index to events Signed-off-by: Alexandru Vasile * tx-pool: Adjust testing to reflect tx index in events Signed-off-by: Alexandru Vasile * rpc/tx: Convert tx-pool events for the new RPC spec Signed-off-by: Alexandru Vasile * rpc/tx: Convert tx-pool `FinalityTimeout` event to `Dropped` Signed-off-by: Alexandru Vasile * service: Enable the `transaction` API Signed-off-by: Alexandru Vasile * rpc/tx: Add tests for tx event encoding and decoding Signed-off-by: Alexandru Vasile * tx: Add indentation for subscriptions Signed-off-by: Alexandru Vasile * rpc/tx: Fix documentation Signed-off-by: Alexandru Vasile * rpc/tx: Serialize usize to hex Signed-off-by: Alexandru Vasile * tx-pool: Rename closure parameters Signed-off-by: Alexandru Vasile * service: Separate RPC spec versions Signed-off-by: Alexandru Vasile * rpc/tx: Use `H256` for testing block's hash Signed-off-by: Alexandru Vasile * rpc/tx: Serialize numbers as string Signed-off-by: Alexandru Vasile * tx-pool: Backward compatibility with RPC v1 Signed-off-by: Alexandru Vasile * Update client/rpc-spec-v2/src/transaction/transaction.rs Co-authored-by: Niklas Adolfsson * rpc/tx: Remove comment about serde clone Signed-off-by: Alexandru Vasile * rpc/tx: Use RPC custom error code for invalid tx format Signed-off-by: Alexandru Vasile * Update client/rpc-spec-v2/src/transaction/event.rs Co-authored-by: James Wilson * rpc/tx: Adjust internal structures for serialization/deserialization Signed-off-by: Alexandru Vasile Signed-off-by: Alexandru Vasile Co-authored-by: Niklas Adolfsson Co-authored-by: James Wilson --- Cargo.lock | 11 + client/rpc-spec-v2/Cargo.toml | 10 + client/rpc-spec-v2/src/lib.rs | 4 + client/rpc-spec-v2/src/transaction/api.rs | 37 ++ client/rpc-spec-v2/src/transaction/error.rs | 100 +++++ client/rpc-spec-v2/src/transaction/event.rs | 353 ++++++++++++++++++ client/rpc-spec-v2/src/transaction/mod.rs | 38 ++ .../src/transaction/transaction.rs | 208 +++++++++++ client/service/Cargo.toml | 1 + client/service/src/builder.rs | 12 + client/transaction-pool/api/Cargo.toml | 3 + client/transaction-pool/api/src/lib.rs | 62 ++- client/transaction-pool/src/graph/listener.rs | 17 +- client/transaction-pool/src/graph/pool.rs | 4 +- client/transaction-pool/src/graph/watcher.rs | 8 +- client/transaction-pool/tests/pool.rs | 41 +- 16 files changed, 873 insertions(+), 36 deletions(-) create mode 100644 client/rpc-spec-v2/src/transaction/api.rs create mode 100644 client/rpc-spec-v2/src/transaction/error.rs create mode 100644 client/rpc-spec-v2/src/transaction/event.rs create mode 100644 client/rpc-spec-v2/src/transaction/mod.rs create mode 100644 client/rpc-spec-v2/src/transaction/transaction.rs diff --git a/Cargo.lock b/Cargo.lock index 49b3dd3cf957b..04b90dfffba1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8792,10 +8792,19 @@ dependencies = [ name = "sc-rpc-spec-v2" version = "0.10.0-dev" dependencies = [ + "futures", "hex", "jsonrpsee", + "parity-scale-codec", "sc-chain-spec", + "sc-transaction-pool-api", + "serde", "serde_json", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-runtime", + "thiserror", "tokio", ] @@ -8848,6 +8857,7 @@ dependencies = [ "sc-offchain", "sc-rpc", "sc-rpc-server", + "sc-rpc-spec-v2", "sc-sysinfo", "sc-telemetry", "sc-tracing", @@ -9068,6 +9078,7 @@ dependencies = [ "futures", "log", "serde", + "serde_json", "sp-blockchain", "sp-runtime", "thiserror", diff --git a/client/rpc-spec-v2/Cargo.toml b/client/rpc-spec-v2/Cargo.toml index 12dec7464e6d0..885d415eb50d2 100644 --- a/client/rpc-spec-v2/Cargo.toml +++ b/client/rpc-spec-v2/Cargo.toml @@ -16,7 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } # Internal chain structures for "chain_spec". sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } +# Pool for submitting extrinsics required by "transaction" +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +codec = { package = "parity-scale-codec", version = "3.0.0" } +thiserror = "1.0" +serde = "1.0" hex = "0.4" +futures = "0.3.21" [dev-dependencies] serde_json = "1.0" diff --git a/client/rpc-spec-v2/src/lib.rs b/client/rpc-spec-v2/src/lib.rs index 297fda13172d6..f4b9d2f95bf97 100644 --- a/client/rpc-spec-v2/src/lib.rs +++ b/client/rpc-spec-v2/src/lib.rs @@ -24,3 +24,7 @@ #![deny(unused_crate_dependencies)] pub mod chain_spec; +pub mod transaction; + +/// Task executor that is being used by RPC subscriptions. +pub type SubscriptionTaskExecutor = std::sync::Arc; diff --git a/client/rpc-spec-v2/src/transaction/api.rs b/client/rpc-spec-v2/src/transaction/api.rs new file mode 100644 index 0000000000000..2f0c799f1cc19 --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/api.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API trait for transactions. + +use crate::transaction::event::TransactionEvent; +use jsonrpsee::proc_macros::rpc; +use sp_core::Bytes; + +#[rpc(client, server)] +pub trait TransactionApi { + /// Submit an extrinsic to watch. + /// + /// See [`TransactionEvent`](crate::transaction::event::TransactionEvent) for details on + /// transaction life cycle. + #[subscription( + name = "transaction_unstable_submitAndWatch" => "transaction_unstable_submitExtrinsic", + unsubscribe = "transaction_unstable_unwatch", + item = TransactionEvent, + )] + fn submit_and_watch(&self, bytes: Bytes); +} diff --git a/client/rpc-spec-v2/src/transaction/error.rs b/client/rpc-spec-v2/src/transaction/error.rs new file mode 100644 index 0000000000000..72a5959992f9e --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/error.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction RPC errors. +//! +//! Errors are interpreted as transaction events for subscriptions. + +use crate::transaction::event::{TransactionError, TransactionEvent}; +use sc_transaction_pool_api::error::Error as PoolError; +use sp_runtime::transaction_validity::InvalidTransaction; + +/// Transaction RPC errors. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Transaction pool error. + #[error("Transaction pool error: {}", .0)] + Pool(#[from] PoolError), + /// Verification error. + #[error("Extrinsic verification error: {}", .0)] + Verification(Box), +} + +impl From for TransactionEvent { + fn from(e: Error) -> Self { + match e { + Error::Verification(e) => TransactionEvent::Invalid(TransactionError { + error: format!("Verification error: {}", e), + }), + Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => + TransactionEvent::Invalid(TransactionError { + error: format!("Invalid transaction with custom error: {}", e), + }), + Error::Pool(PoolError::InvalidTransaction(e)) => { + let msg: &str = e.into(); + TransactionEvent::Invalid(TransactionError { + error: format!("Invalid transaction: {}", msg), + }) + }, + Error::Pool(PoolError::UnknownTransaction(e)) => { + let msg: &str = e.into(); + TransactionEvent::Invalid(TransactionError { + error: format!("Unknown transaction validity: {}", msg), + }) + }, + Error::Pool(PoolError::TemporarilyBanned) => + TransactionEvent::Invalid(TransactionError { + error: "Transaction is temporarily banned".into(), + }), + Error::Pool(PoolError::AlreadyImported(_)) => + TransactionEvent::Invalid(TransactionError { + error: "Transaction is already imported".into(), + }), + Error::Pool(PoolError::TooLowPriority { old, new }) => + TransactionEvent::Invalid(TransactionError { + error: format!( + "The priority of the transactin is too low (pool {} > current {})", + old, new + ), + }), + Error::Pool(PoolError::CycleDetected) => TransactionEvent::Invalid(TransactionError { + error: "The transaction contains a cyclic dependency".into(), + }), + Error::Pool(PoolError::ImmediatelyDropped) => + TransactionEvent::Invalid(TransactionError { + error: "The transaction could not enter the pool because of the limit".into(), + }), + Error::Pool(PoolError::Unactionable) => TransactionEvent::Invalid(TransactionError { + error: "Transaction cannot be propagated and the local node does not author blocks" + .into(), + }), + Error::Pool(PoolError::NoTagsProvided) => TransactionEvent::Invalid(TransactionError { + error: "Transaction does not provide any tags, so the pool cannot identify it" + .into(), + }), + Error::Pool(PoolError::InvalidBlockId(_)) => + TransactionEvent::Invalid(TransactionError { + error: "The provided block ID is not valid".into(), + }), + Error::Pool(PoolError::RejectedFutureTransaction) => + TransactionEvent::Invalid(TransactionError { + error: "The pool is not accepting future transactions".into(), + }), + } + } +} diff --git a/client/rpc-spec-v2/src/transaction/event.rs b/client/rpc-spec-v2/src/transaction/event.rs new file mode 100644 index 0000000000000..3c75eaff10fd4 --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/event.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! The transaction's event returned as json compatible object. + +use serde::{Deserialize, Serialize}; + +/// The transaction was broadcasted to a number of peers. +/// +/// # Note +/// +/// The RPC does not guarantee that the peers have received the +/// transaction. +/// +/// When the number of peers is zero, the event guarantees that +/// shutting down the local node will lead to the transaction +/// not being included in the chain. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionBroadcasted { + /// The number of peers the transaction was broadcasted to. + #[serde(with = "as_string")] + pub num_peers: usize, +} + +/// The transaction was included in a block of the chain. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionBlock { + /// The hash of the block the transaction was included into. + pub hash: Hash, + /// The index (zero-based) of the transaction within the body of the block. + #[serde(with = "as_string")] + pub index: usize, +} + +/// The transaction could not be processed due to an error. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionError { + /// Reason of the error. + pub error: String, +} + +/// The transaction was dropped because of exceeding limits. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionDropped { + /// True if the transaction was broadcasted to other peers and + /// may still be included in the block. + pub broadcasted: bool, + /// Reason of the event. + pub error: String, +} + +/// Possible transaction status events. +/// +/// The status events can be grouped based on their kinds as: +/// +/// 1. Runtime validated the transaction: +/// - `Validated` +/// +/// 2. Inside the `Ready` queue: +/// - `Broadcast` +/// +/// 3. Leaving the pool: +/// - `BestChainBlockIncluded` +/// - `Invalid` +/// +/// 4. Block finalized: +/// - `Finalized` +/// +/// 5. At any time: +/// - `Dropped` +/// - `Error` +/// +/// The subscription's stream is considered finished whenever the following events are +/// received: `Finalized`, `Error`, `Invalid` or `Dropped`. However, the user is allowed +/// to unsubscribe at any moment. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +// We need to manually specify the trait bounds for the `Hash` trait to ensure `into` and +// `from` still work. +#[serde(bound( + serialize = "Hash: Serialize + Clone", + deserialize = "Hash: Deserialize<'de> + Clone" +))] +#[serde(into = "TransactionEventIR", from = "TransactionEventIR")] +pub enum TransactionEvent { + /// The transaction was validated by the runtime. + Validated, + /// The transaction was broadcasted to a number of peers. + Broadcasted(TransactionBroadcasted), + /// The transaction was included in a best block of the chain. + /// + /// # Note + /// + /// This may contain `None` if the block is no longer a best + /// block of the chain. + BestChainBlockIncluded(Option>), + /// The transaction was included in a finalized block. + Finalized(TransactionBlock), + /// The transaction could not be processed due to an error. + Error(TransactionError), + /// The transaction is marked as invalid. + Invalid(TransactionError), + /// The client was not capable of keeping track of this transaction. + Dropped(TransactionDropped), +} + +/// Intermediate representation (IR) for the transaction events +/// that handles block events only. +/// +/// The block events require a JSON compatible interpretation similar to: +/// +/// ```json +/// { event: "EVENT", block: { hash: "0xFF", index: 0 } } +/// ``` +/// +/// This IR is introduced to circumvent that the block events need to +/// be serialized/deserialized with "tag" and "content", while other +/// events only require "tag". +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event", content = "block")] +enum TransactionEventBlockIR { + /// The transaction was included in the best block of the chain. + BestChainBlockIncluded(Option>), + /// The transaction was included in a finalized block of the chain. + Finalized(TransactionBlock), +} + +/// Intermediate representation (IR) for the transaction events +/// that handles non-block events only. +/// +/// The non-block events require a JSON compatible interpretation similar to: +/// +/// ```json +/// { event: "EVENT", num_peers: 0 } +/// ``` +/// +/// This IR is introduced to circumvent that the block events need to +/// be serialized/deserialized with "tag" and "content", while other +/// events only require "tag". +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +enum TransactionEventNonBlockIR { + Validated, + Broadcasted(TransactionBroadcasted), + Error(TransactionError), + Invalid(TransactionError), + Dropped(TransactionDropped), +} + +/// Intermediate representation (IR) used for serialization/deserialization of the +/// [`TransactionEvent`] in a JSON compatible format. +/// +/// Serde cannot mix `#[serde(tag = "event")]` with `#[serde(tag = "event", content = "block")]` +/// for specific enum variants. Therefore, this IR is introduced to circumvent this +/// restriction, while exposing a simplified [`TransactionEvent`] for users of the +/// rust ecosystem. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(bound(serialize = "Hash: Serialize", deserialize = "Hash: Deserialize<'de>"))] +#[serde(rename_all = "camelCase")] +#[serde(untagged)] +enum TransactionEventIR { + Block(TransactionEventBlockIR), + NonBlock(TransactionEventNonBlockIR), +} + +impl From> for TransactionEventIR { + fn from(value: TransactionEvent) -> Self { + match value { + TransactionEvent::Validated => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated), + TransactionEvent::Broadcasted(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)), + TransactionEvent::BestChainBlockIncluded(event) => + TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)), + TransactionEvent::Finalized(event) => + TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)), + TransactionEvent::Error(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)), + TransactionEvent::Invalid(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)), + TransactionEvent::Dropped(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)), + } + } +} + +impl From> for TransactionEvent { + fn from(value: TransactionEventIR) -> Self { + match value { + TransactionEventIR::NonBlock(status) => match status { + TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, + TransactionEventNonBlockIR::Broadcasted(event) => + TransactionEvent::Broadcasted(event), + TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), + TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), + TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), + }, + TransactionEventIR::Block(block) => match block { + TransactionEventBlockIR::Finalized(event) => TransactionEvent::Finalized(event), + TransactionEventBlockIR::BestChainBlockIncluded(event) => + TransactionEvent::BestChainBlockIncluded(event), + }, + } + } +} + +/// Serialize and deserialize helper as string. +mod as_string { + use super::*; + use serde::{Deserializer, Serializer}; + + pub fn serialize(data: &usize, serializer: S) -> Result { + data.to_string().serialize(serializer) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result { + String::deserialize(deserializer)? + .parse() + .map_err(|e| serde::de::Error::custom(format!("Parsing failed: {}", e))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::H256; + + #[test] + fn validated_event() { + let event: TransactionEvent<()> = TransactionEvent::Validated; + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"validated"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn broadcasted_event() { + let event: TransactionEvent<()> = + TransactionEvent::Broadcasted(TransactionBroadcasted { num_peers: 2 }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"broadcasted","numPeers":"2"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn best_chain_event() { + let event: TransactionEvent<()> = TransactionEvent::BestChainBlockIncluded(None); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"bestChainBlockIncluded","block":null}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + + let event: TransactionEvent = + TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { + hash: H256::from_low_u64_be(1), + index: 2, + })); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"bestChainBlockIncluded","block":{"hash":"0x0000000000000000000000000000000000000000000000000000000000000001","index":"2"}}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn finalized_event() { + let event: TransactionEvent = TransactionEvent::Finalized(TransactionBlock { + hash: H256::from_low_u64_be(1), + index: 10, + }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"finalized","block":{"hash":"0x0000000000000000000000000000000000000000000000000000000000000001","index":"10"}}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn error_event() { + let event: TransactionEvent<()> = + TransactionEvent::Error(TransactionError { error: "abc".to_string() }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"error","error":"abc"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn invalid_event() { + let event: TransactionEvent<()> = + TransactionEvent::Invalid(TransactionError { error: "abc".to_string() }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"invalid","error":"abc"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn dropped_event() { + let event: TransactionEvent<()> = TransactionEvent::Dropped(TransactionDropped { + broadcasted: true, + error: "abc".to_string(), + }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"dropped","broadcasted":true,"error":"abc"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } +} diff --git a/client/rpc-spec-v2/src/transaction/mod.rs b/client/rpc-spec-v2/src/transaction/mod.rs new file mode 100644 index 0000000000000..bb983894a428c --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/mod.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate transaction API. +//! +//! The transaction methods allow submitting a transaction and subscribing to +//! its status updates generated by the chain. +//! +//! # Note +//! +//! Methods are prefixed by `transaction`. + +pub mod api; +pub mod error; +pub mod event; +pub mod transaction; + +pub use api::TransactionApiServer; +pub use event::{ + TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, + TransactionEvent, +}; +pub use transaction::Transaction; diff --git a/client/rpc-spec-v2/src/transaction/transaction.rs b/client/rpc-spec-v2/src/transaction/transaction.rs new file mode 100644 index 0000000000000..e2cf736dff17a --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/transaction.rs @@ -0,0 +1,208 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API implementation for submitting transactions. + +use crate::{ + transaction::{ + api::TransactionApiServer, + error::Error, + event::{ + TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, + TransactionEvent, + }, + }, + SubscriptionTaskExecutor, +}; +use jsonrpsee::{ + core::async_trait, + types::{ + error::{CallError, ErrorObject}, + SubscriptionResult, + }, + SubscriptionSink, +}; +use sc_transaction_pool_api::{ + error::IntoPoolError, BlockHash, TransactionFor, TransactionPool, TransactionSource, + TransactionStatus, +}; +use std::sync::Arc; + +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_core::Bytes; +use sp_runtime::{generic, traits::Block as BlockT}; + +use codec::Decode; +use futures::{FutureExt, StreamExt, TryFutureExt}; + +/// An API for transaction RPC calls. +pub struct Transaction { + /// Substrate client. + client: Arc, + /// Transactions pool. + pool: Arc, + /// Executor to spawn subscriptions. + executor: SubscriptionTaskExecutor, +} + +impl Transaction { + /// Creates a new [`Transaction`]. + pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { + Transaction { client, pool, executor } + } +} + +/// Currently we treat all RPC transactions as externals. +/// +/// Possibly in the future we could allow opt-in for special treatment +/// of such transactions, so that the block authors can inject +/// some unique transactions via RPC and have them included in the pool. +const TX_SOURCE: TransactionSource = TransactionSource::External; + +/// Extrinsic has an invalid format. +/// +/// # Note +/// +/// This is similar to the old `author` API error code. +const BAD_FORMAT: i32 = 1001; + +#[async_trait] +impl TransactionApiServer> for Transaction +where + Pool: TransactionPool + Sync + Send + 'static, + Pool::Hash: Unpin, + ::Hash: Unpin, + Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, +{ + fn submit_and_watch(&self, mut sink: SubscriptionSink, xt: Bytes) -> SubscriptionResult { + // This is the only place where the RPC server can return an error for this + // subscription. Other defects must be signaled as events to the sink. + let decoded_extrinsic = match TransactionFor::::decode(&mut &xt[..]) { + Ok(decoded_extrinsic) => decoded_extrinsic, + Err(e) => { + let err = CallError::Custom(ErrorObject::owned( + BAD_FORMAT, + format!("Extrinsic has invalid format: {}", e), + None::<()>, + )); + let _ = sink.reject(err); + return Ok(()) + }, + }; + + let best_block_hash = self.client.info().best_hash; + + let submit = self + .pool + .submit_and_watch( + &generic::BlockId::hash(best_block_hash), + TX_SOURCE, + decoded_extrinsic, + ) + .map_err(|e| { + e.into_pool_error() + .map(Error::from) + .unwrap_or_else(|e| Error::Verification(Box::new(e))) + }); + + let fut = async move { + match submit.await { + Ok(stream) => { + let mut state = TransactionState::new(); + let stream = + stream.filter_map(|event| async move { state.handle_event(event) }); + sink.pipe_from_stream(stream.boxed()).await; + }, + Err(err) => { + // We have not created an `Watcher` for the tx. Make sure the + // error is still propagated as an event. + let event: TransactionEvent<::Hash> = err.into(); + sink.pipe_from_stream(futures::stream::once(async { event }).boxed()).await; + }, + }; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) + } +} + +/// The transaction's state that needs to be preserved between +/// multiple events generated by the transaction-pool. +/// +/// # Note +/// +/// In the future, the RPC server can submit only the last event when multiple +/// identical events happen in a row. +#[derive(Clone, Copy)] +struct TransactionState { + /// True if the transaction was previously broadcasted. + broadcasted: bool, +} + +impl TransactionState { + /// Construct a new [`TransactionState`]. + pub fn new() -> Self { + TransactionState { broadcasted: false } + } + + /// Handle events generated by the transaction-pool and convert them + /// to the new API expected state. + #[inline] + pub fn handle_event( + &mut self, + event: TransactionStatus, + ) -> Option> { + match event { + TransactionStatus::Ready | TransactionStatus::Future => + Some(TransactionEvent::::Validated), + TransactionStatus::Broadcast(peers) => { + // Set the broadcasted flag once if we submitted the transaction to + // at least one peer. + self.broadcasted = self.broadcasted || !peers.is_empty(); + + Some(TransactionEvent::Broadcasted(TransactionBroadcasted { + num_peers: peers.len(), + })) + }, + TransactionStatus::InBlock((hash, index)) => + Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { + hash, + index, + }))), + TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), + TransactionStatus::FinalityTimeout(_) => + Some(TransactionEvent::Dropped(TransactionDropped { + broadcasted: self.broadcasted, + error: "Maximum number of finality watchers has been reached".into(), + })), + TransactionStatus::Finalized((hash, index)) => + Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), + TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic was rendered invalid by another extrinsic".into(), + })), + TransactionStatus::Dropped => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic dropped from the pool due to exceeding limits".into(), + })), + TransactionStatus::Invalid => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic marked as invalid".into(), + })), + } + } +} diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 308da96fbbe77..a0c8f21effec1 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -69,6 +69,7 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/a sp-transaction-storage-proof = { version = "4.0.0-dev", path = "../../primitives/transaction-storage-proof" } sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } sc-rpc = { version = "4.0.0-dev", path = "../rpc" } +sc-rpc-spec-v2 = { version = "0.10.0-dev", path = "../rpc-spec-v2" } sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-builder" } sc-informant = { version = "0.10.0-dev", path = "../informant" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 4301e17a8c31e..987198d4b7f48 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -57,6 +57,7 @@ use sc_rpc::{ system::SystemApiServer, DenyUnsafe, SubscriptionTaskExecutor, }; +use sc_rpc_spec_v2::transaction::TransactionApiServer; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; @@ -673,6 +674,13 @@ where (chain, state, child_state) }; + let transaction_v2 = sc_rpc_spec_v2::transaction::Transaction::new( + client.clone(), + transaction_pool.clone(), + task_executor.clone(), + ) + .into_rpc(); + let author = sc_rpc::author::Author::new( client.clone(), transaction_pool, @@ -690,6 +698,10 @@ where rpc_api.merge(offchain).map_err(|e| Error::Application(e.into()))?; } + // Part of the RPC v2 spec. + rpc_api.merge(transaction_v2).map_err(|e| Error::Application(e.into()))?; + + // Part of the old RPC spec. rpc_api.merge(chain).map_err(|e| Error::Application(e.into()))?; rpc_api.merge(author).map_err(|e| Error::Application(e.into()))?; rpc_api.merge(system).map_err(|e| Error::Application(e.into()))?; diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index d34ffe512b023..1ab0f32bc8bad 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -15,3 +15,6 @@ serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } + +[dev-dependencies] +serde_json = "1.0" diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index 0ebb8f9d4cd9c..c0a94516ffc97 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -108,15 +108,18 @@ pub enum TransactionStatus { Ready, /// The transaction has been broadcast to the given peers. Broadcast(Vec), - /// Transaction has been included in block with given hash. - InBlock(BlockHash), + /// Transaction has been included in block with given hash + /// at the given position. + #[serde(with = "v1_compatible")] + InBlock((BlockHash, TxIndex)), /// The block this transaction was included in has been retracted. Retracted(BlockHash), /// Maximum number of finality watchers has been reached, /// old watchers are being removed. FinalityTimeout(BlockHash), - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA - Finalized(BlockHash), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA. + #[serde(with = "v1_compatible")] + Finalized((BlockHash, TxIndex)), /// Transaction has been replaced in the pool, by another transaction /// that provides the same tags. (e.g. same (sender, nonce)). Usurped(Hash), @@ -143,6 +146,8 @@ pub type TransactionFor

= <

::Block as BlockT>::Extrinsi pub type TransactionStatusStreamFor

= TransactionStatusStream, BlockHash

>; /// Transaction type for a local pool. pub type LocalTransactionFor

= <

::Block as BlockT>::Extrinsic; +/// Transaction's index within the block in which it was included. +pub type TxIndex = usize; /// Typical future type used in transaction pool api. pub type PoolFuture = std::pin::Pin> + Send>>; @@ -362,3 +367,52 @@ impl OffchainSubmitTransaction for TP }) } } + +/// Wrapper functions to keep the API backwards compatible over the wire for the old RPC spec. +mod v1_compatible { + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(data: &(H, usize), serializer: S) -> Result + where + S: Serializer, + H: Serialize, + { + let (hash, _) = data; + serde::Serialize::serialize(&hash, serializer) + } + + pub fn deserialize<'de, D, H>(deserializer: D) -> Result<(H, usize), D::Error> + where + D: Deserializer<'de>, + H: Deserialize<'de>, + { + let hash: H = serde::Deserialize::deserialize(deserializer)?; + Ok((hash, 0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn tx_status_compatibility() { + let event: TransactionStatus = TransactionStatus::InBlock((1, 2)); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"inBlock":1}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionStatus = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, TransactionStatus::InBlock((1, 0))); + + let event: TransactionStatus = TransactionStatus::Finalized((1, 2)); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"finalized":1}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionStatus = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, TransactionStatus::Finalized((1, 0))); + } +} diff --git a/client/transaction-pool/src/graph/listener.rs b/client/transaction-pool/src/graph/listener.rs index d4f42b32fdbb8..776749abf2d5d 100644 --- a/client/transaction-pool/src/graph/listener.rs +++ b/client/transaction-pool/src/graph/listener.rs @@ -104,13 +104,18 @@ impl Listener { /// Transaction was pruned from the pool. pub fn pruned(&mut self, block_hash: BlockHash, tx: &H) { debug!(target: "txpool", "[{:?}] Pruned at {:?}", tx, block_hash); - self.fire(tx, |s| s.in_block(block_hash)); - self.finality_watchers.entry(block_hash).or_insert(vec![]).push(tx.clone()); + // Get the transactions included in the given block hash. + let txs = self.finality_watchers.entry(block_hash).or_insert(vec![]); + txs.push(tx.clone()); + // Current transaction is the last one included. + let tx_index = txs.len() - 1; + + self.fire(tx, |watcher| watcher.in_block(block_hash, tx_index)); while self.finality_watchers.len() > MAX_FINALITY_WATCHERS { if let Some((hash, txs)) = self.finality_watchers.pop_front() { for tx in txs { - self.fire(&tx, |s| s.finality_timeout(hash)); + self.fire(&tx, |watcher| watcher.finality_timeout(hash)); } } } @@ -120,7 +125,7 @@ impl Listener { pub fn retracted(&mut self, block_hash: BlockHash) { if let Some(hashes) = self.finality_watchers.remove(&block_hash) { for hash in hashes { - self.fire(&hash, |s| s.retracted(block_hash)) + self.fire(&hash, |watcher| watcher.retracted(block_hash)) } } } @@ -128,9 +133,9 @@ impl Listener { /// Notify all watchers that transactions have been finalized pub fn finalized(&mut self, block_hash: BlockHash) { if let Some(hashes) = self.finality_watchers.remove(&block_hash) { - for hash in hashes { + for (tx_index, hash) in hashes.into_iter().enumerate() { log::debug!(target: "txpool", "[{:?}] Sent finalization event (block {:?})", hash, block_hash); - self.fire(&hash, |s| s.finalized(block_hash)) + self.fire(&hash, |watcher| watcher.finalized(block_hash, tx_index)) } } } diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 19acbddbe7843..108ae791e37b3 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -770,7 +770,7 @@ mod tests { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!( stream.next(), - Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())), + Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), ); } @@ -803,7 +803,7 @@ mod tests { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!( stream.next(), - Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())), + Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), ); } diff --git a/client/transaction-pool/src/graph/watcher.rs b/client/transaction-pool/src/graph/watcher.rs index 8cd78cfc78240..0613300c8684b 100644 --- a/client/transaction-pool/src/graph/watcher.rs +++ b/client/transaction-pool/src/graph/watcher.rs @@ -84,13 +84,13 @@ impl Sender { } /// Extrinsic has been included in block with given hash. - pub fn in_block(&mut self, hash: BH) { - self.send(TransactionStatus::InBlock(hash)); + pub fn in_block(&mut self, hash: BH, index: usize) { + self.send(TransactionStatus::InBlock((hash, index))); } /// Extrinsic has been finalized by a finality gadget. - pub fn finalized(&mut self, hash: BH) { - self.send(TransactionStatus::Finalized(hash)); + pub fn finalized(&mut self, hash: BH, index: usize) { + self.send(TransactionStatus::Finalized((hash, index))); self.is_finalized = true; } diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index f04a27cf81e1d..be75523c1230f 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -328,7 +328,7 @@ fn should_revalidate_across_many_blocks() { block_on( watcher1 - .take_while(|s| future::ready(*s != TransactionStatus::InBlock(block_hash))) + .take_while(|s| future::ready(*s != TransactionStatus::InBlock((block_hash, 0)))) .collect::>(), ); @@ -398,24 +398,24 @@ fn should_push_watchers_during_maintenance() { futures::executor::block_on_stream(watcher0).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash), - TransactionStatus::Finalized(header_hash) + TransactionStatus::InBlock((header_hash, 0)), + TransactionStatus::Finalized((header_hash, 0)) ], ); assert_eq!( futures::executor::block_on_stream(watcher1).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash), - TransactionStatus::Finalized(header_hash) + TransactionStatus::InBlock((header_hash, 1)), + TransactionStatus::Finalized((header_hash, 1)) ], ); assert_eq!( futures::executor::block_on_stream(watcher2).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash), - TransactionStatus::Finalized(header_hash) + TransactionStatus::InBlock((header_hash, 2)), + TransactionStatus::Finalized((header_hash, 2)) ], ); } @@ -450,8 +450,8 @@ fn finalization() { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); assert_eq!(stream.next(), None); } @@ -573,30 +573,31 @@ fn fork_aware_finalization() { for (canon_watcher, h) in canon_watchers { let mut stream = futures::executor::block_on_stream(canon_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(h))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((h, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((h, 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_dave_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c2, 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((e1, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((e1, 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((d2, 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); + // In block e1 we submitted: [dave, bob] xts in this order. + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((e1, 1)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((e1, 1)))); assert_eq!(stream.next(), None); } } @@ -646,10 +647,10 @@ fn prune_and_retract_tx_at_same_time() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1, 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2, 0)))); assert_eq!(stream.next(), None); } } From 60be7b8d03b8dc4a15080bddcefb1a9b20598d6a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 11 Oct 2022 15:56:18 +0200 Subject: [PATCH 60/75] Remove the unused light client requests (#12470) * Remove the unused light client requests * Add comment about new ids --- .../src/light_client_requests/handler.rs | 4 -- .../network/light/src/schema/light.v1.proto | 51 +------------------ 2 files changed, 2 insertions(+), 53 deletions(-) diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 5efdc3ff6a18b..9dc02eb9ff291 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -151,12 +151,8 @@ where self.on_remote_call_request(&peer, r)?, Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => self.on_remote_read_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteHeaderRequest(_r)) => - return Err(HandleRequestError::BadRequest("Not supported.")), Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => self.on_remote_read_child_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteChangesRequest(_r)) => - return Err(HandleRequestError::BadRequest("Not supported.")), None => return Err(HandleRequestError::BadRequest("Remote request without request data.")), }; diff --git a/client/network/light/src/schema/light.v1.proto b/client/network/light/src/schema/light.v1.proto index 9b5d47719dc28..1df5466e21988 100644 --- a/client/network/light/src/schema/light.v1.proto +++ b/client/network/light/src/schema/light.v1.proto @@ -17,9 +17,8 @@ message Request { oneof request { RemoteCallRequest remote_call_request = 1; RemoteReadRequest remote_read_request = 2; - RemoteHeaderRequest remote_header_request = 3; RemoteReadChildRequest remote_read_child_request = 4; - RemoteChangesRequest remote_changes_request = 5; + // Note: ids 3 and 5 were used in the past. It would be preferable to not re-use them. } } @@ -28,8 +27,7 @@ message Response { oneof response { RemoteCallResponse remote_call_response = 1; RemoteReadResponse remote_read_response = 2; - RemoteHeaderResponse remote_header_response = 3; - RemoteChangesResponse remote_changes_response = 4; + // Note: ids 3 and 4 were used in the past. It would be preferable to not re-use them. } } @@ -73,48 +71,3 @@ message RemoteReadChildRequest { // Storage keys. repeated bytes keys = 6; } - -// Remote header request. -message RemoteHeaderRequest { - // Block number to request header for. - bytes block = 2; -} - -// Remote header response. -message RemoteHeaderResponse { - // Header. None if proof generation has failed (e.g. header is unknown). - bytes header = 2; // optional - // Header proof. - bytes proof = 3; -} - -/// Remote changes request. -message RemoteChangesRequest { - // Hash of the first block of the range (including first) where changes are requested. - bytes first = 2; - // Hash of the last block of the range (including last) where changes are requested. - bytes last = 3; - // Hash of the first block for which the requester has the changes trie root. All other - // affected roots must be proved. - bytes min = 4; - // Hash of the last block that we can use when querying changes. - bytes max = 5; - // Storage child node key which changes are requested. - bytes storage_key = 6; // optional - // Storage key which changes are requested. - bytes key = 7; -} - -// Remote changes response. -message RemoteChangesResponse { - // Proof has been generated using block with this number as a max block. Should be - // less than or equal to the RemoteChangesRequest::max block number. - bytes max = 2; - // Changes proof. - repeated bytes proof = 3; - // Changes tries roots missing on the requester' node. - repeated Pair roots = 4; - // Missing changes tries roots proof. - bytes roots_proof = 5; -} - From 6072b90096102767076b24da840d1e1ccf1baabc Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Tue, 11 Oct 2022 18:25:12 +0300 Subject: [PATCH 61/75] Fix flaky service test (#12472) Sometimes `NotificationStreamOpenened` would be received for the other protocol before `SyncConnected` was received so when the connection was closed, an incorrect event was read from the event stream. --- client/network/common/src/config.rs | 4 ++++ client/network/src/service/tests.rs | 26 ++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index e4a7f04c8d6e8..96c7c11ec2696 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -244,6 +244,10 @@ pub struct NonDefaultSetConfig { /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` pub fallback_names: Vec, /// Handshake of the protocol + /// + /// NOTE: Currently custom handshakes are not fully supported. See issue #5685 for more + /// details. This field is temporarily used to allow moving the hardcoded block announcement + /// protocol out of `protocol.rs`. pub handshake: Option, /// Maximum allowed size of single notifications. pub max_notification_size: u64, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 7c651c675b83e..b656d7a7c0ddc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -637,19 +637,21 @@ async fn disconnect_sync_peer_using_block_announcement_protocol_name() { ..config::NetworkConfiguration::new_local() }); - loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, - _ => {}, - }; + async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { + let mut notif_received = false; + let mut sync_received = false; + + while !notif_received || !sync_received { + match stream.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => notif_received = true, + Event::SyncConnected { .. } => sync_received = true, + _ => {}, + }; + } } - loop { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, - _ => {}, - }; - } + wait_for_events(&mut events_stream1).await; + wait_for_events(&mut events_stream2).await; // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); @@ -659,7 +661,7 @@ async fn disconnect_sync_peer_using_block_announcement_protocol_name() { )); let _ = events_stream2.next().await; // ignore the reopen event - // now disconnect using the block announcement protocol, verify that `SyncDisconnected` is + // now disconnect using `BLOCK_ANNOUNCE_PROTO_NAME`, verify that `SyncDisconnected` is // emitted node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); From 1bf2e6db6ccc0f9d872cddf9e3254227d100f2bf Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Wed, 12 Oct 2022 00:35:54 +0800 Subject: [PATCH 62/75] Rename `from_components` to `from_parts` (#12473) * Rename `from_components` to `from_parts` * Fixes * Spelling --- frame/contracts/src/gas.rs | 2 +- frame/election-provider-multi-phase/src/mock.rs | 2 +- frame/system/src/limits.rs | 2 +- primitives/weights/src/weight_v2.rs | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index d0076652dd6d4..c0cc2db2aa3eb 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -109,7 +109,7 @@ where pub fn nested(&mut self, amount: Weight) -> Result { // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. - let amount = Weight::from_components( + let amount = Weight::from_parts( if amount.ref_time().is_zero() { self.gas_left().ref_time() } else { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 2615d863c91e0..6a638c8d2a2e1 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -239,7 +239,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults( - Weight::from_components(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + Weight::from_parts(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), NORMAL_DISPATCH_RATIO, ); } diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index 07ad240afe159..eb95b699eba32 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -208,7 +208,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { Self::with_sensible_defaults( - Weight::from_components(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + Weight::from_parts(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), DEFAULT_NORMAL_RATIO, ) } diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index 8596a782c1fa7..2933d80099dd7 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -112,8 +112,8 @@ impl Weight { Self { ref_time: 0, proof_size } } - /// Construct [`Weight`] with weight components, namely reference time and storage size weights. - pub const fn from_components(ref_time: u64, proof_size: u64) -> Self { + /// Construct [`Weight`] from weight parts, namely reference time and proof size weights. + pub const fn from_parts(ref_time: u64, proof_size: u64) -> Self { Self { ref_time, proof_size } } @@ -455,8 +455,8 @@ mod tests { #[test] fn is_zero_works() { assert!(Weight::zero().is_zero()); - assert!(!Weight::from_components(1, 0).is_zero()); - assert!(!Weight::from_components(0, 1).is_zero()); + assert!(!Weight::from_parts(1, 0).is_zero()); + assert!(!Weight::from_parts(0, 1).is_zero()); assert!(!Weight::MAX.is_zero()); } } From ccc8f6cc3debf7276cbdff78c7a906be6221ca35 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Tue, 11 Oct 2022 14:41:43 -0400 Subject: [PATCH 63/75] Avoid Unstable Sort (#12455) * dont use unstable sort * remove comment * add clippy rule --- .cargo/config.toml | 5 +++-- bin/node/bench/src/core.rs | 2 +- client/rpc-servers/src/lib.rs | 2 +- frame/benchmarking/src/analysis.rs | 4 ++-- frame/contracts/src/wasm/runtime.rs | 6 +----- frame/election-provider-multi-phase/src/mock.rs | 2 +- frame/examples/basic/src/benchmarking.rs | 2 +- primitives/npos-elections/fuzzer/src/common.rs | 6 +++--- utils/frame/benchmarking-cli/src/shared/stats.rs | 2 +- 9 files changed, 14 insertions(+), 17 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 5355758f7a4fa..66b28b3485d86 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -18,14 +18,15 @@ rustflags = [ "-Aclippy::borrowed-box", # Reasonable to fix this one "-Aclippy::too-many-arguments", # (Turning this on would lead to) "-Aclippy::unnecessary_cast", # Types may change - "-Aclippy::identity-op", # One case where we do 0 + + "-Aclippy::identity-op", # One case where we do 0 + "-Aclippy::useless_conversion", # Types may change "-Aclippy::unit_arg", # styalistic. "-Aclippy::option-map-unit-fn", # styalistic - "-Aclippy::bind_instead_of_map", # styalistic + "-Aclippy::bind_instead_of_map", # styalistic "-Aclippy::erasing_op", # E.g. 0 * DOLLARS "-Aclippy::eq_op", # In tests we test equality. "-Aclippy::while_immutable_condition", # false positives "-Aclippy::needless_option_as_deref", # false positives "-Aclippy::derivable_impls", # false positives + "-Aclippy::stable_sort_primitive", # prefer stable sort ] diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index 3b3060a888349..b6ad3ecd80068 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -132,7 +132,7 @@ pub fn run_benchmark(benchmark: Box, mode: Mode) -> Be durations.push(duration.as_nanos()); } - durations.sort_unstable(); + durations.sort(); let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 0e2aca0dcc829..7eb825e169bfa 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -198,7 +198,7 @@ fn format_allowed_hosts(addrs: &[SocketAddr]) -> Vec { fn build_rpc_api(mut rpc_api: RpcModule) -> RpcModule { let mut available_methods = rpc_api.method_names().collect::>(); - available_methods.sort_unstable(); + available_methods.sort(); rpc_api .register_method("rpc_methods", move |_, _| { diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 9ba2ea657bab0..a736cdc203182 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -181,7 +181,7 @@ impl Analysis { }) .collect(); - values.sort_unstable(); + values.sort(); let mid = values.len() / 2; Some(Self { @@ -311,7 +311,7 @@ impl Analysis { } for (_, rs) in results.iter_mut() { - rs.sort_unstable(); + rs.sort(); let ql = rs.len() / 4; *rs = rs[ql..rs.len() - ql].to_vec(); } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 3296492994071..6d7e6bcf69e5f 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1976,11 +1976,7 @@ pub mod env { data_len: u32, ) -> Result<(), TrapReason> { fn has_duplicates(items: &mut Vec) -> bool { - // # Warning - // - // Unstable sorts are non-deterministic across architectures. The usage here is OK - // because we are rejecting duplicates which removes the non determinism. - items.sort_unstable(); + items.sort(); // Find any two consecutive equal elements. items.windows(2).any(|w| match &w { &[a, b] => a == b, diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 6a638c8d2a2e1..e04e0bf474caf 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -155,7 +155,7 @@ pub fn trim_helpers() -> TrimHelpers { seq_phragmen(desired_targets as usize, targets.clone(), voters.clone(), None).unwrap(); // sort by decreasing order of stake - assignments.sort_unstable_by_key(|assignment| { + assignments.sort_by_key(|assignment| { std::cmp::Reverse(stakes.get(&assignment.who).cloned().unwrap_or_default()) }); diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index 4d1659af46460..87d65a0bfa5b6 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -63,7 +63,7 @@ benchmarks! { } }: { // The benchmark execution phase could also be a closure with custom code - m.sort_unstable(); + m.sort(); } // This line generates test cases for benchmarking, and could be run by: diff --git a/primitives/npos-elections/fuzzer/src/common.rs b/primitives/npos-elections/fuzzer/src/common.rs index e5853f28c4929..ad9bd43f9bce0 100644 --- a/primitives/npos-elections/fuzzer/src/common.rs +++ b/primitives/npos-elections/fuzzer/src/common.rs @@ -80,7 +80,7 @@ pub fn generate_random_npos_inputs( } candidates.push(id); } - candidates.sort_unstable(); + candidates.sort(); candidates.dedup(); assert_eq!(candidates.len(), candidate_count); @@ -99,11 +99,11 @@ pub fn generate_random_npos_inputs( let mut chosen_candidates = Vec::with_capacity(n_candidates_chosen); chosen_candidates.extend(candidates.choose_multiple(&mut rng, n_candidates_chosen)); - chosen_candidates.sort_unstable(); + chosen_candidates.sort(); voters.push((id, vote_weight, chosen_candidates)); } - voters.sort_unstable(); + voters.sort(); voters.dedup_by_key(|(id, _weight, _chosen_candidates)| *id); assert_eq!(voters.len(), voter_count); diff --git a/utils/frame/benchmarking-cli/src/shared/stats.rs b/utils/frame/benchmarking-cli/src/shared/stats.rs index 3234d5f2f94f7..ffae4a17724f8 100644 --- a/utils/frame/benchmarking-cli/src/shared/stats.rs +++ b/utils/frame/benchmarking-cli/src/shared/stats.rs @@ -112,7 +112,7 @@ impl Stats { /// Returns the specified percentile for the given data. /// This is best effort since it ignores the interpolation case. fn percentile(mut xs: Vec, p: f64) -> u64 { - xs.sort_unstable(); + xs.sort(); let index = (xs.len() as f64 * p).ceil() as usize - 1; xs[index.clamp(0, xs.len() - 1)] } From 94b731c4f7059662232fa7b5bdc5a64b17d69252 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 11 Oct 2022 22:20:13 +0200 Subject: [PATCH 64/75] Finalized block event triggers tx maintanance (#12305) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * finalized block event triggers tx maintanance * tx-pool: enactment helper introduced * tx-pool: ChainApi: added tree_route method * enactment logic implemented + tests Signed-off-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> * Some additional tests * minor improvements * trigger CI job * fix compilation errors ChainApi::tree_route return type changed to Result>, as some implementations (tests) are not required to provide this tree route. * formatting * trait removed * implementation slightly simplified (thanks to @koute) * get rid of Arc<> in EnactmentState return value * minor improvement * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Apply suggestions from code review * comment updated + formatting * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Davide Galassi * formatting * finalization notification bug fix + new test case + log::warn message when finalized block is being retracted by new event * added error message on tree_route failure * Apply suggestions from code review Co-authored-by: Bastian Köcher * use provided tree_route in Finalized event * Option removed from ChainApi::tree_route * doc added, test and logs improved * handle_enactment aligned with original implementation * use async-await * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Bastian Köcher * formatting + warn->debug * compilation error fix * enactment_state initializers added * enactment_state: Option removed * manual-seal: compilation & tests fix * manual-seal: tests fixed * tests cleanup * another compilation error fixed * TreeRoute::new added * get rid of pub hack * one more test added * formatting * TreeRoute::new doc added + formatting * Apply suggestions from code review Co-authored-by: Davide Galassi * (bool,Option) simplified to Option * log message improved * yet another review suggestions applied * get rid of hash in handle_enactment * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update client/transaction-pool/src/lib.rs Co-authored-by: Bastian Köcher * minor corrections * EnactmentState moved to new file * File header corrected * error formatting aligned with codebase * Apply suggestions from code review Co-authored-by: Bastian Köcher * remove commented code * small nits Signed-off-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Davide Galassi Co-authored-by: Bastian Köcher Co-authored-by: André Silva --- Cargo.lock | 2 + client/consensus/manual-seal/src/lib.rs | 28 +- client/transaction-pool/Cargo.toml | 1 + client/transaction-pool/api/Cargo.toml | 1 + client/transaction-pool/api/src/lib.rs | 4 +- client/transaction-pool/benches/basics.rs | 8 + client/transaction-pool/src/api.rs | 30 +- .../transaction-pool/src/enactment_state.rs | 579 ++++++++++++++++ client/transaction-pool/src/graph/pool.rs | 8 + client/transaction-pool/src/lib.rs | 346 +++++----- client/transaction-pool/src/tests.rs | 9 + client/transaction-pool/tests/pool.rs | 624 +++++++++++++++++- primitives/blockchain/src/header_metadata.rs | 7 + .../runtime/transaction-pool/src/lib.rs | 10 +- 14 files changed, 1466 insertions(+), 191 deletions(-) create mode 100644 client/transaction-pool/src/enactment_state.rs diff --git a/Cargo.lock b/Cargo.lock index 04b90dfffba1e..d29023f330ce1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9044,6 +9044,7 @@ version = "4.0.0-dev" dependencies = [ "array-bytes", "assert_matches", + "async-trait", "criterion", "futures", "futures-timer", @@ -9075,6 +9076,7 @@ dependencies = [ name = "sc-transaction-pool-api" version = "4.0.0-dev" dependencies = [ + "async-trait", "futures", "log", "serde", diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 4672e7275a56b..09ab139b91c73 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -305,9 +305,8 @@ pub async fn run_instant_seal_and_finalize( mod tests { use super::*; use sc_basic_authorship::ProposerFactory; - use sc_client_api::BlockBackend; use sc_consensus::ImportedAux; - use sc_transaction_pool::{BasicPool, Options, RevalidationType}; + use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; use sp_inherents::InherentData; use sp_runtime::generic::{BlockId, Digest, DigestItem}; @@ -359,6 +358,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -367,6 +367,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as transactions are imported into the @@ -429,6 +431,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -437,6 +440,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the @@ -505,8 +510,13 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let pool_api = api(); + let pool_api = Arc::new(FullChainApi::new( + client.clone(), + None, + &sp_core::testing::TaskExecutor::new(), + )); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -515,6 +525,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the @@ -550,7 +562,6 @@ mod tests { .await .unwrap(); let created_block = rx.await.unwrap().unwrap(); - pool_api.increment_nonce(Alice.into()); // assert that the background task returns ok assert_eq!( @@ -566,8 +577,7 @@ mod tests { } } ); - let block = client.block(&BlockId::Number(1)).unwrap().unwrap().block; - pool_api.add_block(block, true); + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above"); @@ -588,9 +598,6 @@ mod tests { .await .is_ok()); assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); - let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block; - pool_api.add_block(block, true); - pool_api.increment_nonce(Alice.into()); assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); @@ -614,6 +621,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -622,6 +630,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 5e005f5523ae8..0bdfb623e6c14 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -13,6 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" futures-timer = "3.0.2" diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index 1ab0f32bc8bad..366d0eb99b945 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -9,6 +9,7 @@ repository = "https://github.com/paritytech/substrate/" description = "Transaction pool client facing API." [dependencies] +async-trait = "0.1.57" futures = "0.3.21" log = "0.4.17" serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index c0a94516ffc97..c1e49ad07d7b1 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -21,6 +21,7 @@ pub mod error; +use async_trait::async_trait; use futures::{Future, Stream}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_runtime::{ @@ -303,9 +304,10 @@ pub enum ChainEvent { } /// Trait for transaction pool maintenance. +#[async_trait] pub trait MaintainedTransactionPool: TransactionPool { /// Perform maintenance - fn maintain(&self, event: ChainEvent) -> Pin + Send>>; + async fn maintain(&self, event: ChainEvent); } /// Transaction pool interface for submitting local transactions that exposes a diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index a7991269439ce..2632f8fb6aab5 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -121,6 +121,14 @@ impl ChainApi for TestApi { ) -> Result::Header>, Self::Error> { Ok(None) } + + fn tree_route( + &self, + _from: ::Hash, + _to: ::Hash, + ) -> Result, Self::Error> { + unimplemented!() + } } fn uxt(transfer: Transfer) -> Extrinsic { diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 4710c96b003cd..110647b8cb3b0 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -30,6 +30,7 @@ use std::{marker::PhantomData, pin::Pin, sync::Arc}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_client_api::{blockchain::HeaderBackend, BlockBackend}; use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{HeaderMetadata, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, @@ -111,8 +112,11 @@ impl FullChainApi { impl graph::ChainApi for FullChainApi where Block: BlockT, - Client: - ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: ProvideRuntimeApi + + BlockBackend + + BlockIdTo + + HeaderBackend + + HeaderMetadata, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -190,6 +194,14 @@ where ) -> Result::Header>, Self::Error> { self.client.header(*at).map_err(Into::into) } + + fn tree_route( + &self, + from: ::Hash, + to: ::Hash, + ) -> Result, Self::Error> { + sp_blockchain::tree_route::(&*self.client, from, to).map_err(Into::into) + } } /// Helper function to validate a transaction using a full chain API. @@ -202,8 +214,11 @@ fn validate_transaction_blocking( ) -> error::Result where Block: BlockT, - Client: - ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: ProvideRuntimeApi + + BlockBackend + + BlockIdTo + + HeaderBackend + + HeaderMetadata, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -264,8 +279,11 @@ where impl FullChainApi where Block: BlockT, - Client: - ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: ProvideRuntimeApi + + BlockBackend + + BlockIdTo + + HeaderBackend + + HeaderMetadata, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs new file mode 100644 index 0000000000000..242b557dfbbbd --- /dev/null +++ b/client/transaction-pool/src/enactment_state.rs @@ -0,0 +1,579 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate transaction pool implementation. + +use sc_transaction_pool_api::ChainEvent; +use sp_blockchain::TreeRoute; +use sp_runtime::traits::Block as BlockT; + +/// Helper struct for keeping track of the current state of processed new best +/// block and finalized events. The main purpose of keeping track of this state +/// is to figure out if a transaction pool enactment is needed or not. +/// +/// Given the following chain: +/// +/// B1-C1-D1-E1 +/// / +/// A +/// \ +/// B2-C2-D2-E2 +/// +/// Some scenarios and expected behavior for sequence of `NewBestBlock` (`nbb`) and `Finalized` +/// (`f`) events: +/// +/// - `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(C1))` +/// - `f(C1)`, `nbb(C1)` -> false (enactment was already performed in `f(C1))` +/// - `f(C1)`, `nbb(D2)` -> false (enactment was already performed in `f(C1)`, +/// we should not retract finalized block) +/// - `f(C1)`, `f(C2)`, `nbb(C1)` -> false +/// - `nbb(C1)`, `nbb(C2)` -> true (switching fork is OK) +/// - `nbb(B1)`, `nbb(B2)` -> true +/// - `nbb(B1)`, `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(B1)`) +/// - `nbb(C1)`, `f(B1)` -> false (enactment was already performed in `nbb(B2)`) +pub struct EnactmentState +where + Block: BlockT, +{ + recent_best_block: Block::Hash, + recent_finalized_block: Block::Hash, +} + +impl EnactmentState +where + Block: BlockT, +{ + /// Returns a new `EnactmentState` initialized with the given parameters. + pub fn new(recent_best_block: Block::Hash, recent_finalized_block: Block::Hash) -> Self { + EnactmentState { recent_best_block, recent_finalized_block } + } + + /// Returns the recently finalized block. + pub fn recent_finalized_block(&self) -> Block::Hash { + self.recent_finalized_block + } + + /// Updates the state according to the given `ChainEvent`, returning + /// `Some(tree_route)` with a tree route including the blocks that need to + /// be enacted/retracted. If no enactment is needed then `None` is returned. + pub fn update( + &mut self, + event: &ChainEvent, + tree_route: &F, + ) -> Result>, String> + where + F: Fn(Block::Hash, Block::Hash) -> Result, String>, + { + let (new_hash, finalized) = match event { + ChainEvent::NewBestBlock { hash, .. } => (*hash, false), + ChainEvent::Finalized { hash, .. } => (*hash, true), + }; + + // block was already finalized + if self.recent_finalized_block == new_hash { + log::debug!(target: "txpool", "handle_enactment: block already finalized"); + return Ok(None) + } + + // compute actual tree route from best_block to notified block, and use + // it instead of tree_route provided with event + let tree_route = tree_route(self.recent_best_block, new_hash)?; + + log::debug!( + target: "txpool", + "resolve hash:{:?} finalized:{:?} tree_route:{:?} best_block:{:?} finalized_block:{:?}", + new_hash, finalized, tree_route, self.recent_best_block, self.recent_finalized_block + ); + + // check if recently finalized block is on retracted path. this could be + // happening if we first received a finalization event and then a new + // best event for some old stale best head. + if tree_route.retracted().iter().any(|x| x.hash == self.recent_finalized_block) { + log::debug!( + target: "txpool", + "Recently finalized block {} would be retracted by ChainEvent {}, skipping", + self.recent_finalized_block, new_hash + ); + return Ok(None) + } + + if finalized { + self.recent_finalized_block = new_hash; + + // if there are no enacted blocks in best_block -> hash tree_route, + // it means that block being finalized was already enacted (this + // case also covers best_block == new_hash), recent_best_block + // remains valid. + if tree_route.enacted().is_empty() { + log::trace!( + target: "txpool", + "handle_enactment: no newly enacted blocks since recent best block" + ); + return Ok(None) + } + + // otherwise enacted finalized block becomes best block... + } + + self.recent_best_block = new_hash; + + Ok(Some(tree_route)) + } +} + +#[cfg(test)] +mod enactment_state_tests { + use super::EnactmentState; + use sc_transaction_pool_api::ChainEvent; + use sp_blockchain::{HashAndNumber, TreeRoute}; + use std::sync::Arc; + use substrate_test_runtime_client::runtime::{Block, Hash}; + + // some helpers for convenient blocks' hash naming + fn a() -> HashAndNumber { + HashAndNumber { number: 1, hash: Hash::from([0xAA; 32]) } + } + fn b1() -> HashAndNumber { + HashAndNumber { number: 2, hash: Hash::from([0xB1; 32]) } + } + fn c1() -> HashAndNumber { + HashAndNumber { number: 3, hash: Hash::from([0xC1; 32]) } + } + fn d1() -> HashAndNumber { + HashAndNumber { number: 4, hash: Hash::from([0xD1; 32]) } + } + fn e1() -> HashAndNumber { + HashAndNumber { number: 5, hash: Hash::from([0xE1; 32]) } + } + fn b2() -> HashAndNumber { + HashAndNumber { number: 2, hash: Hash::from([0xB2; 32]) } + } + fn c2() -> HashAndNumber { + HashAndNumber { number: 3, hash: Hash::from([0xC2; 32]) } + } + fn d2() -> HashAndNumber { + HashAndNumber { number: 4, hash: Hash::from([0xD2; 32]) } + } + fn e2() -> HashAndNumber { + HashAndNumber { number: 5, hash: Hash::from([0xE2; 32]) } + } + + /// mock tree_route computing function for simple two-forks chain + fn tree_route(from: Hash, to: Hash) -> Result, String> { + let chain = vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()]; + let pivot = 4_usize; + + let from = chain + .iter() + .position(|bn| bn.hash == from) + .ok_or("existing block should be given")?; + let to = chain + .iter() + .position(|bn| bn.hash == to) + .ok_or("existing block should be given")?; + + // B1-C1-D1-E1 + // / + // A + // \ + // B2-C2-D2-E2 + // + // [E1 D1 C1 B1 A B2 C2 D2 E2] + + let vec: Vec> = if from < to { + chain.into_iter().skip(from).take(to - from + 1).collect() + } else { + chain.into_iter().skip(to).take(from - to + 1).rev().collect() + }; + + let pivot = if from <= pivot && to <= pivot { + if from < to { + to - from + } else { + 0 + } + } else if from >= pivot && to >= pivot { + if from < to { + 0 + } else { + from - to + } + } else { + if from < to { + pivot - from + } else { + from - pivot + } + }; + + Ok(TreeRoute::new(vec, pivot)) + } + + mod mock_tree_route_tests { + use super::*; + + /// asserts that tree routes are equal + fn assert_treeroute_eq(expected: TreeRoute, result: TreeRoute) { + assert_eq!(result.common_block().hash, expected.common_block().hash); + assert_eq!(result.enacted().len(), expected.enacted().len()); + assert_eq!(result.retracted().len(), expected.retracted().len()); + assert!(result + .enacted() + .iter() + .zip(expected.enacted().iter()) + .all(|(a, b)| a.hash == b.hash)); + assert!(result + .retracted() + .iter() + .zip(expected.retracted().iter()) + .all(|(a, b)| a.hash == b.hash)); + } + + // some tests for mock tree_route function + #[test] + fn tree_route_mock_test_01() { + let result = tree_route(b1().hash, a().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1(), a()], 1); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_02() { + let result = tree_route(a().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![a(), b1()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_03() { + let result = tree_route(a().hash, c2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![a(), b2(), c2()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_04() { + let result = tree_route(e2().hash, a().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a()], 4); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_05() { + let result = tree_route(d1().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![d1(), c1(), b1()], 2); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_06() { + let result = tree_route(d2().hash, b2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![d2(), c2(), b2()], 2); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_07() { + let result = tree_route(b1().hash, d1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1(), c1(), d1()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_08() { + let result = tree_route(b2().hash, d2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b2(), c2(), d2()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_09() { + let result = tree_route(e2().hash, e1().hash).expect("tree route exists"); + let expected = + TreeRoute::new(vec![e2(), d2(), c2(), b2(), a(), b1(), c1(), d1(), e1()], 4); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_10() { + let result = tree_route(e1().hash, e2().hash).expect("tree route exists"); + let expected = + TreeRoute::new(vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()], 4); + assert_treeroute_eq(result, expected); + } + #[test] + fn tree_route_mock_test_11() { + let result = tree_route(b1().hash, c2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1(), a(), b2(), c2()], 1); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_12() { + let result = tree_route(d2().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![d2(), c2(), b2(), a(), b1()], 3); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_13() { + let result = tree_route(c2().hash, e1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![c2(), b2(), a(), b1(), c1(), d1(), e1()], 2); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_14() { + let result = tree_route(b1().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_15() { + let result = tree_route(b2().hash, b2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b2()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_16() { + let result = tree_route(a().hash, a().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![a()], 0); + assert_treeroute_eq(result, expected); + } + } + + fn trigger_new_best_block( + state: &mut EnactmentState, + from: HashAndNumber, + acted_on: HashAndNumber, + ) -> bool { + let (from, acted_on) = (from.hash, acted_on.hash); + + let event_tree_route = tree_route(from, acted_on).expect("Tree route exists"); + + state + .update( + &ChainEvent::NewBestBlock { + hash: acted_on, + tree_route: Some(Arc::new(event_tree_route)), + }, + &tree_route, + ) + .unwrap() + .is_some() + } + + fn trigger_finalized( + state: &mut EnactmentState, + from: HashAndNumber, + acted_on: HashAndNumber, + ) -> bool { + let (from, acted_on) = (from.hash, acted_on.hash); + + let v = tree_route(from, acted_on) + .expect("Tree route exists") + .enacted() + .iter() + .map(|h| h.hash) + .collect::>(); + + state + .update(&ChainEvent::Finalized { hash: acted_on, tree_route: v.into() }, &tree_route) + .unwrap() + .is_some() + } + + fn assert_es_eq( + es: &EnactmentState, + expected_best_block: HashAndNumber, + expected_finalized_block: HashAndNumber, + ) { + assert_eq!(es.recent_best_block, expected_best_block.hash); + assert_eq!(es.recent_finalized_block, expected_finalized_block.hash); + } + + #[test] + fn test_enactment_helper() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // B1-C1-D1-E1 + // / + // A + // \ + // B2-C2-D2-E2 + + let result = trigger_new_best_block(&mut es, a(), d1()); + assert!(result); + assert_es_eq(&es, d1(), a()); + + let result = trigger_new_best_block(&mut es, d1(), e1()); + assert!(result); + assert_es_eq(&es, e1(), a()); + + let result = trigger_finalized(&mut es, a(), d2()); + assert!(result); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, d2(), e1()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_finalized(&mut es, a(), b2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_finalized(&mut es, a(), b1()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, a(), d2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_finalized(&mut es, a(), d2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, a(), c2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, a(), c1()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, d2(), e2()); + assert!(result); + assert_es_eq(&es, e2(), d2()); + + let result = trigger_finalized(&mut es, d2(), e2()); + assert_eq!(result, false); + assert_es_eq(&es, e2(), e2()); + } + + #[test] + fn test_enactment_helper_2() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_new_best_block(&mut es, a(), b1()); + assert!(result); + assert_es_eq(&es, b1(), a()); + + let result = trigger_new_best_block(&mut es, b1(), c1()); + assert!(result); + assert_es_eq(&es, c1(), a()); + + let result = trigger_new_best_block(&mut es, c1(), d1()); + assert!(result); + assert_es_eq(&es, d1(), a()); + + let result = trigger_new_best_block(&mut es, d1(), e1()); + assert!(result); + assert_es_eq(&es, e1(), a()); + + let result = trigger_finalized(&mut es, a(), c1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), c1()); + + let result = trigger_finalized(&mut es, c1(), e1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), e1()); + } + + #[test] + fn test_enactment_helper_3() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_new_best_block(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), a()); + + let result = trigger_finalized(&mut es, a(), b1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), b1()); + } + + #[test] + fn test_enactment_helper_4() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_finalized(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), e1()); + + let result = trigger_finalized(&mut es, e1(), b1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), e1()); + } + + #[test] + fn test_enactment_helper_5() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // B1-C1-D1-E1 + // / + // A + // \ + // B2-C2-D2-E2 + + let result = trigger_finalized(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), e1()); + + let result = trigger_finalized(&mut es, e1(), e2()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), e1()); + } + + #[test] + fn test_enactment_helper_6() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_new_best_block(&mut es, a(), b1()); + assert!(result); + assert_es_eq(&es, b1(), a()); + + let result = trigger_finalized(&mut es, a(), d1()); + assert!(result); + assert_es_eq(&es, d1(), d1()); + + let result = trigger_new_best_block(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), d1()); + + let result = trigger_new_best_block(&mut es, a(), c1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), d1()); + } +} diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 108ae791e37b3..9afceffe8dddf 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -20,6 +20,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use futures::{channel::mpsc::Receiver, Future}; use sc_transaction_pool_api::error; +use sp_blockchain::TreeRoute; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, SaturatedConversion}, @@ -97,6 +98,13 @@ pub trait ChainApi: Send + Sync { &self, at: &BlockId, ) -> Result::Header>, Self::Error>; + + /// Compute a tree-route between two blocks. See [`TreeRoute`] for more details. + fn tree_route( + &self, + from: ::Hash, + to: ::Hash, + ) -> Result, Self::Error>; } /// Pool configuration options. diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 7b9ce9d6047c0..410ab662e3601 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -23,6 +23,7 @@ #![warn(unused_extern_crates)] mod api; +mod enactment_state; pub mod error; mod graph; mod metrics; @@ -31,6 +32,8 @@ mod revalidation; mod tests; pub use crate::api::FullChainApi; +use async_trait::async_trait; +use enactment_state::EnactmentState; use futures::{ channel::oneshot, future::{self, ready}, @@ -62,6 +65,8 @@ use std::time::Instant; use crate::metrics::MetricsLink as PrometheusMetrics; use prometheus_endpoint::Registry as PrometheusRegistry; +use sp_blockchain::{HashAndNumber, TreeRoute}; + type BoxedReadyIterator = Box>> + Send>; @@ -85,6 +90,7 @@ where revalidation_queue: Arc>, ready_poll: Arc, Block>>>, metrics: PrometheusMetrics, + enactment_state: Arc>>, } struct ReadyPoll { @@ -163,7 +169,11 @@ where PoolApi: graph::ChainApi + 'static, { /// Create new basic transaction pool with provided api, for tests. - pub fn new_test(pool_api: Arc) -> (Self, Pin + Send>>) { + pub fn new_test( + pool_api: Arc, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, + ) -> (Self, Pin + Send>>) { let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); @@ -175,6 +185,10 @@ where revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), ready_poll: Default::default(), metrics: Default::default(), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), }, background_task, ) @@ -190,6 +204,8 @@ where revalidation_type: RevalidationType, spawner: impl SpawnEssentialNamed, best_block_number: NumberFor, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, ) -> Self { let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { @@ -217,6 +233,10 @@ where })), ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), metrics: PrometheusMetrics::new(prometheus), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), } } @@ -358,6 +378,7 @@ where + sp_runtime::traits::BlockIdTo + sc_client_api::ExecutorProvider + sc_client_api::UsageProvider + + sp_blockchain::HeaderMetadata + Send + Sync + 'static, @@ -380,6 +401,8 @@ where RevalidationType::Full, spawner, client.usage_info().chain.best_number, + client.usage_info().chain.best_hash, + client.usage_info().chain.finalized_hash, )); // make transaction pool available for off-chain runtime calls. @@ -396,7 +419,8 @@ where Client: sp_api::ProvideRuntimeApi + sc_client_api::BlockBackend + sc_client_api::blockchain::HeaderBackend - + sp_runtime::traits::BlockIdTo, + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata, Client: Send + Sync + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, { @@ -563,166 +587,190 @@ async fn prune_known_txs_for_block MaintainedTransactionPool for BasicPool +impl BasicPool where Block: BlockT, PoolApi: 'static + graph::ChainApi, { - fn maintain(&self, event: ChainEvent) -> Pin + Send>> { - match event { - ChainEvent::NewBestBlock { hash, tree_route } => { - let pool = self.pool.clone(); - let api = self.api.clone(); - - let id = BlockId::hash(hash); - let block_number = match api.block_id_to_number(&id) { - Ok(Some(number)) => number, - _ => { - log::trace!( + /// Handles enactment and retraction of blocks, prunes stale transactions + /// (that have already been enacted) and resubmits transactions that were + /// retracted. + async fn handle_enactment(&self, tree_route: TreeRoute) { + log::trace!(target: "txpool", "handle_enactment tree_route: {tree_route:?}"); + let pool = self.pool.clone(); + let api = self.api.clone(); + + let (hash, block_number) = match tree_route.last() { + Some(HashAndNumber { hash, number }) => (hash, number), + None => { + log::warn!( + target: "txpool", + "Skipping ChainEvent - no last block in tree route {:?}", + tree_route, + ); + return + }, + }; + + let next_action = self.revalidation_strategy.lock().next( + *block_number, + Some(std::time::Duration::from_secs(60)), + Some(20u32.into()), + ); + + // We keep track of everything we prune so that later we won't add + // transactions with those hashes from the retracted blocks. + let mut pruned_log = HashSet::>::new(); + + // If there is a tree route, we use this to prune known tx based on the enacted + // blocks. Before pruning enacted transactions, we inform the listeners about + // retracted blocks and their transactions. This order is important, because + // if we enact and retract the same transaction at the same time, we want to + // send first the retract and than the prune event. + for retracted in tree_route.retracted() { + // notify txs awaiting finality that it has been retracted + pool.validated_pool().on_block_retracted(retracted.hash); + } + + future::join_all( + tree_route + .enacted() + .iter() + .map(|h| prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool)), + ) + .await + .into_iter() + .for_each(|enacted_log| { + pruned_log.extend(enacted_log); + }); + + self.metrics + .report(|metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64)); + + if next_action.resubmit { + let mut resubmit_transactions = Vec::new(); + + for retracted in tree_route.retracted() { + let hash = retracted.hash; + + let block_transactions = api + .block_body(&BlockId::hash(hash)) + .await + .unwrap_or_else(|e| { + log::warn!("Failed to fetch block body: {}", e); + None + }) + .unwrap_or_default() + .into_iter() + .filter(|tx| tx.is_signed().unwrap_or(true)); + + let mut resubmitted_to_report = 0; + + resubmit_transactions.extend(block_transactions.into_iter().filter(|tx| { + let tx_hash = pool.hash_of(tx); + let contains = pruned_log.contains(&tx_hash); + + // need to count all transactions, not just filtered, here + resubmitted_to_report += 1; + + if !contains { + log::debug!( target: "txpool", - "Skipping chain event - no number for that block {:?}", - id, + "[{:?}]: Resubmitting from retracted block {:?}", + tx_hash, + hash, ); - return Box::pin(ready(())) - }, - }; - - let next_action = self.revalidation_strategy.lock().next( - block_number, - Some(std::time::Duration::from_secs(60)), - Some(20u32.into()), - ); - let revalidation_strategy = self.revalidation_strategy.clone(); - let revalidation_queue = self.revalidation_queue.clone(); - let ready_poll = self.ready_poll.clone(); - let metrics = self.metrics.clone(); - - async move { - // We keep track of everything we prune so that later we won't add - // transactions with those hashes from the retracted blocks. - let mut pruned_log = HashSet::>::new(); - - // If there is a tree route, we use this to prune known tx based on the enacted - // blocks. Before pruning enacted transactions, we inform the listeners about - // retracted blocks and their transactions. This order is important, because - // if we enact and retract the same transaction at the same time, we want to - // send first the retract and than the prune event. - if let Some(ref tree_route) = tree_route { - for retracted in tree_route.retracted() { - // notify txs awaiting finality that it has been retracted - pool.validated_pool().on_block_retracted(retracted.hash); - } - - future::join_all(tree_route.enacted().iter().map(|h| { - prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool) - })) - .await - .into_iter() - .for_each(|enacted_log| { - pruned_log.extend(enacted_log); - }) } + !contains + })); - pruned_log.extend(prune_known_txs_for_block(id, &*api, &*pool).await); - - metrics.report(|metrics| { - metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) - }); - - if let (true, Some(tree_route)) = (next_action.resubmit, tree_route) { - let mut resubmit_transactions = Vec::new(); - - for retracted in tree_route.retracted() { - let hash = retracted.hash; - - let block_transactions = api - .block_body(&BlockId::hash(hash)) - .await - .unwrap_or_else(|e| { - log::warn!("Failed to fetch block body: {}", e); - None - }) - .unwrap_or_default() - .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); - - let mut resubmitted_to_report = 0; - - resubmit_transactions.extend(block_transactions.into_iter().filter( - |tx| { - let tx_hash = pool.hash_of(tx); - let contains = pruned_log.contains(&tx_hash); - - // need to count all transactions, not just filtered, here - resubmitted_to_report += 1; - - if !contains { - log::debug!( - target: "txpool", - "[{:?}]: Resubmitting from retracted block {:?}", - tx_hash, - hash, - ); - } - !contains - }, - )); - - metrics.report(|metrics| { - metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) - }); - } - - if let Err(e) = pool - .resubmit_at( - &id, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await - { - log::debug!( - target: "txpool", - "[{:?}] Error re-submitting transactions: {}", - id, - e, - ) - } - } + self.metrics.report(|metrics| { + metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) + }); + } - let extra_pool = pool.clone(); - // After #5200 lands, this arguably might be moved to the - // handler of "all blocks notification". - ready_poll.lock().trigger(block_number, move || { - Box::new(extra_pool.validated_pool().ready()) - }); + if let Err(e) = pool + .resubmit_at( + &BlockId::Hash(*hash), + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + ) + .await + { + log::debug!( + target: "txpool", + "[{:?}] Error re-submitting transactions: {}", + hash, + e, + ) + } + } - if next_action.revalidate { - let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); - revalidation_queue.revalidate_later(block_number, hashes).await; + let extra_pool = pool.clone(); + // After #5200 lands, this arguably might be moved to the + // handler of "all blocks notification". + self.ready_poll + .lock() + .trigger(*block_number, move || Box::new(extra_pool.validated_pool().ready())); - revalidation_strategy.lock().clear(); - } - } - .boxed() + if next_action.revalidate { + let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); + self.revalidation_queue.revalidate_later(*block_number, hashes).await; + + self.revalidation_strategy.lock().clear(); + } + } +} + +#[async_trait] +impl MaintainedTransactionPool for BasicPool +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, +{ + async fn maintain(&self, event: ChainEvent) { + let prev_finalized_block = self.enactment_state.lock().recent_finalized_block(); + let compute_tree_route = |from, to| -> Result, String> { + match self.api.tree_route(from, to) { + Ok(tree_route) => Ok(tree_route), + Err(e) => + return Err(format!( + "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" + )), + } + }; + + let result = self.enactment_state.lock().update(&event, &compute_tree_route); + + match result { + Err(msg) => { + log::warn!(target: "txpool", "{msg}"); + return }, - ChainEvent::Finalized { hash, tree_route } => { - let pool = self.pool.clone(); - async move { - for hash in tree_route.iter().chain(&[hash]) { - if let Err(e) = pool.validated_pool().on_block_finalized(*hash).await { - log::warn!( - target: "txpool", - "Error [{}] occurred while attempting to notify watchers of finalization {}", - e, hash - ) - } - } - } - .boxed() + Ok(None) => {}, + Ok(Some(tree_route)) => { + self.handle_enactment(tree_route).await; }, + }; + + if let ChainEvent::Finalized { hash, tree_route } = event { + log::trace!( + target: "txpool", + "on-finalized enacted: {tree_route:?}, previously finalized: \ + {prev_finalized_block:?}", + ); + + for hash in tree_route.iter().chain(std::iter::once(&hash)) { + if let Err(e) = self.pool.validated_pool().on_block_finalized(*hash).await { + log::warn!( + target: "txpool", + "Error occurred while attempting to notify watchers about finalization {}: {}", + hash, e + ) + } + } } } } diff --git a/client/transaction-pool/src/tests.rs b/client/transaction-pool/src/tests.rs index 79142e16a1b36..ce2c7872e32bb 100644 --- a/client/transaction-pool/src/tests.rs +++ b/client/transaction-pool/src/tests.rs @@ -22,6 +22,7 @@ use crate::graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool}; use codec::Encode; use parking_lot::Mutex; use sc_transaction_pool_api::error; +use sp_blockchain::TreeRoute; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Hash}, @@ -173,6 +174,14 @@ impl ChainApi for TestApi { ) -> Result::Header>, Self::Error> { Ok(None) } + + fn tree_route( + &self, + _from: ::Hash, + _to: ::Hash, + ) -> Result, Self::Error> { + unimplemented!() + } } pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index be75523c1230f..5590051768e9a 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -30,13 +30,14 @@ use sc_transaction_pool::*; use sc_transaction_pool_api::{ ChainEvent, MaintainedTransactionPool, TransactionPool, TransactionStatus, }; +use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, traits::Block as _, transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; -use std::{collections::BTreeSet, sync::Arc}; +use std::{collections::BTreeSet, pin::Pin, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block, Extrinsic, Hash, Header, Index, Transfer}, AccountKeyring::*, @@ -50,13 +51,32 @@ fn pool() -> Pool { fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { let api = Arc::new(TestApi::with_alice_nonce(209)); - let (pool, background_task) = BasicPool::new_test(api.clone()); + let (pool, background_task) = create_basic_pool_with_genesis(api.clone()); let thread_pool = futures::executor::ThreadPool::new().unwrap(); thread_pool.spawn_ok(background_task); (pool, api, thread_pool) } +fn create_basic_pool_with_genesis( + test_api: Arc, +) -> (BasicPool, Pin + Send>>) { + let genesis_hash = { + test_api + .chain() + .read() + .block_by_number + .get(&0) + .map(|blocks| blocks[0].0.header.hash()) + .expect("there is block 0. qed") + }; + BasicPool::new_test(test_api, genesis_hash, genesis_hash) +} + +fn create_basic_pool(test_api: TestApi) -> BasicPool { + create_basic_pool_with_genesis(Arc::from(test_api)).0 +} + const SOURCE: TransactionSource = TransactionSource::External; #[test] @@ -436,7 +456,7 @@ fn finalization() { let xt = uxt(Alice, 209); let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) .expect("1. Imported"); pool.api().push_block(2, vec![xt.clone()], true); @@ -459,9 +479,9 @@ fn finalization() { fn fork_aware_finalization() { let api = TestApi::empty(); // starting block A1 (last finalized.) - api.push_block(1, vec![], true); + let a_header = api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let mut canon_watchers = vec![]; let from_alice = uxt(Alice, 1); @@ -476,10 +496,13 @@ fn fork_aware_finalization() { let from_dave_watcher; let from_bob_watcher; let b1; + let c1; let d1; let c2; let d2; + block_on(pool.maintain(block_event(a_header))); + // block B1 { let watcher = @@ -489,6 +512,7 @@ fn fork_aware_finalization() { canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; b1 = header.hash(); block_on(pool.maintain(event)); @@ -504,6 +528,7 @@ fn fork_aware_finalization() { block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> C2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; c2 = header.hash(); block_on(pool.maintain(event)); @@ -518,6 +543,7 @@ fn fork_aware_finalization() { assert_eq!(pool.status().ready, 1); let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true); + log::trace!(target:"txpool", ">> D2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d2 = header.hash(); block_on(pool.maintain(event)); @@ -530,8 +556,9 @@ fn fork_aware_finalization() { block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) .expect("1.Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api().push_block(3, vec![from_charlie.clone()], true); - + let header = pool.api().push_block_with_parent(b1, vec![from_charlie.clone()], true); + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1 = header.hash(); canon_watchers.push((watcher, header.hash())); let event = block_event_with_retracted(header.clone(), d2, pool.api()); block_on(pool.maintain(event)); @@ -547,11 +574,12 @@ fn fork_aware_finalization() { let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api().push_block(4, vec![xt.clone()], true); + let header = pool.api().push_block_with_parent(c1, vec![xt.clone()], true); + log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header); + d1 = header.hash(); canon_watchers.push((w, header.hash())); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; - d1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); let event = ChainEvent::Finalized { hash: d1, tree_route: Arc::from(vec![]) }; @@ -560,9 +588,10 @@ fn fork_aware_finalization() { let e1; - // block e1 + // block E1 { - let header = pool.api().push_block(5, vec![from_dave, from_bob], true); + let header = pool.api().push_block_with_parent(d1, vec![from_dave, from_bob], true); + log::trace!(target:"txpool", ">> E1: {:?} {:?}", header.hash(), header); e1 = header.hash(); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); @@ -610,7 +639,7 @@ fn prune_and_retract_tx_at_same_time() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let from_alice = uxt(Alice, 1); pool.api().increment_nonce(Alice.into()); @@ -676,7 +705,7 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); @@ -721,7 +750,7 @@ fn resubmit_from_retracted_fork() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); @@ -866,13 +895,14 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { #[test] fn should_not_accept_old_signatures() { let client = Arc::new(substrate_test_runtime_client::new()); - + let best_hash = client.info().best_hash; + let finalized_hash = client.info().finalized_hash; let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new( - client, - None, - &sp_core::testing::TaskExecutor::new(), - ))) + BasicPool::new_test( + Arc::new(FullChainApi::new(client, None, &sp_core::testing::TaskExecutor::new())), + best_hash, + finalized_hash, + ) .0, ); @@ -908,12 +938,19 @@ fn should_not_accept_old_signatures() { fn import_notification_to_pool_maintain_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); + let best_hash = client.info().best_hash; + let finalized_hash = client.info().finalized_hash; + let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new( - client.clone(), - None, - &sp_core::testing::TaskExecutor::new(), - ))) + BasicPool::new_test( + Arc::new(FullChainApi::new( + client.clone(), + None, + &sp_core::testing::TaskExecutor::new(), + )), + best_hash, + finalized_hash, + ) .0, ); @@ -998,3 +1035,540 @@ fn stale_transactions_are_pruned() { assert_eq!(pool.status().future, 0); assert_eq!(pool.status().ready, 0); } + +#[test] +fn finalized_only_handled_correctly() { + sp_tracing::try_init_simple(); + let xt = uxt(Alice, 209); + + let (pool, api, _guard) = maintained_pool(); + + let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) + .expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + + let header = api.push_block(1, vec![xt], false); + + let event = + ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + + assert_eq!(pool.status().ready, 0); + + { + let mut stream = futures::executor::block_on_stream(watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn best_block_after_finalized_handled_correctly() { + sp_tracing::try_init_simple(); + let xt = uxt(Alice, 209); + + let (pool, api, _guard) = maintained_pool(); + + let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) + .expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + + let header = api.push_block(1, vec![xt], true); + + let event = + ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + block_on(pool.maintain(block_event(header.clone()))); + + assert_eq!(pool.status().ready, 0); + + { + let mut stream = futures::executor::block_on_stream(watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn switching_fork_with_finalized_works() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let b2_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block B2 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = pool.api().push_block_with_parent( + a_header.hash(), + vec![from_alice.clone(), from_bob.clone()], + true, + ); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header); + b2_header = header; + } + + { + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn switching_fork_multiple_times_works() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let b2_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block B2 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = pool.api().push_block_with_parent( + a_header.hash(), + vec![from_alice.clone(), from_bob.clone()], + true, + ); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header); + b2_header = header; + } + + { + // phase-0 + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + // phase-1 + let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), pool.api()); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + // phase-2 + let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), pool.api()); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + // phase-3 + let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + //phase-0 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + //phase-1 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + //phase-2 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + //phase-3 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + //phase-1 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + //phase-2 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + //phase-3 + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn two_blocks_delayed_finalization_works() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + let from_charlie = uxt(Charlie, 3); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + pool.api().increment_nonce(Charlie.into()); + + let from_alice_watcher; + let from_bob_watcher; + let from_charlie_watcher; + let b1_header; + let c1_header; + let d1_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block C1 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1_header = header; + } + + // block D1 + { + from_charlie_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true); + assert_eq!(pool.status().ready, 3); + + log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header); + d1_header = header; + } + + { + let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 3); + } + + { + let event = ChainEvent::NewBestBlock { hash: d1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + let event = ChainEvent::Finalized { + hash: c1_header.hash(), + tree_route: Arc::from(vec![b1_header.hash()]), + }; + block_on(pool.maintain(event)); + } + + // this is to collect events from_charlie_watcher and make sure nothing was retracted + { + let event = ChainEvent::Finalized { hash: d1_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_charlie_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(d1_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn delayed_finalization_does_not_retract() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let c1_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block C1 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1_header = header; + } + + { + // phase-0 + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + // phase-1 + let event = ChainEvent::NewBestBlock { hash: c1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + // phase-2 + let event = ChainEvent::Finalized { hash: b1_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + // phase-3 + let event = ChainEvent::Finalized { hash: c1_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + //phase-0 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + //phase-2 + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + //phase-0 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + //phase-1 + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + //phase-3 + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn best_block_after_finalization_does_not_retract() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let c1_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block C1 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1_header = header; + } + + { + let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let event = ChainEvent::Finalized { + hash: c1_header.hash(), + tree_route: Arc::from(vec![a_header.hash(), b1_header.hash()]), + }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), None); + } +} diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 46477a75b8a1f..0ced264d5c786 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -176,6 +176,13 @@ pub struct TreeRoute { } impl TreeRoute { + /// Creates a new `TreeRoute`. + /// + /// It is required that `pivot >= route.len()`, otherwise it may panics. + pub fn new(route: Vec>, pivot: usize) -> Self { + TreeRoute { route, pivot } + } + /// Get a slice of all retracted blocks in reverse order (towards common ancestor). pub fn retracted(&self) -> &[HashAndNumber] { &self.route[..self.pivot] diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 4008427623499..dc8be78aa7397 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -22,7 +22,7 @@ use codec::Encode; use futures::future::ready; use parking_lot::RwLock; -use sp_blockchain::CachedHeaderMetadata; +use sp_blockchain::{CachedHeaderMetadata, TreeRoute}; use sp_runtime::{ generic::{self, BlockId}, traits::{ @@ -335,6 +335,14 @@ impl sc_transaction_pool::ChainApi for TestApi { self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), }) } + + fn tree_route( + &self, + from: ::Hash, + to: ::Hash, + ) -> Result, Self::Error> { + sp_blockchain::tree_route::(self, from, to).map_err(Into::into) + } } impl sp_blockchain::HeaderMetadata for TestApi { From 88db102213b06be4a25e17d195589c57cd4ed435 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 12 Oct 2022 10:06:22 +0200 Subject: [PATCH 65/75] tx-pool: failing tests fixed (#12481) --- client/transaction-pool/tests/pool.rs | 62 +++++++++++++-------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index 5590051768e9a..27891432753a4 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -1058,8 +1058,8 @@ fn finalized_only_handled_correctly() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.clone().hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1087,8 +1087,8 @@ fn best_block_after_finalized_handled_correctly() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.clone().hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1155,18 +1155,18 @@ fn switching_fork_with_finalized_works() { { let mut stream = futures::executor::block_on_stream(from_alice_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 1)))); assert_eq!(stream.next(), None); } } @@ -1250,17 +1250,17 @@ fn switching_fork_multiple_times_works() { let mut stream = futures::executor::block_on_stream(from_alice_watcher); //phase-0 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); //phase-1 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); //phase-2 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); //phase-3 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 0)))); assert_eq!(stream.next(), None); } @@ -1268,13 +1268,13 @@ fn switching_fork_multiple_times_works() { let mut stream = futures::executor::block_on_stream(from_bob_watcher); //phase-1 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); //phase-2 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); //phase-3 - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 1)))); assert_eq!(stream.next(), None); } } @@ -1373,24 +1373,24 @@ fn two_blocks_delayed_finalization_works() { { let mut stream = futures::executor::block_on_stream(from_alice_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_charlie_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(d1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((d1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((d1_header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1472,9 +1472,9 @@ fn delayed_finalization_does_not_retract() { let mut stream = futures::executor::block_on_stream(from_alice_watcher); //phase-0 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); //phase-2 - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); assert_eq!(stream.next(), None); } @@ -1483,9 +1483,9 @@ fn delayed_finalization_does_not_retract() { //phase-0 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); //phase-1 - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); //phase-3 - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1559,16 +1559,16 @@ fn best_block_after_finalization_does_not_retract() { { let mut stream = futures::executor::block_on_stream(from_alice_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); assert_eq!(stream.next(), None); } } From 06a9f0a5da9681287f8a1c7b53497921238ece81 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 12 Oct 2022 15:01:54 +0200 Subject: [PATCH 66/75] Clarify the "direction" field of block requests (#12438) --- client/network/sync/src/schema/api.v1.proto | 1 + 1 file changed, 1 insertion(+) diff --git a/client/network/sync/src/schema/api.v1.proto b/client/network/sync/src/schema/api.v1.proto index 203b157470a58..1490f61a41ddd 100644 --- a/client/network/sync/src/schema/api.v1.proto +++ b/client/network/sync/src/schema/api.v1.proto @@ -24,6 +24,7 @@ message BlockRequest { bytes number = 3; } // Sequence direction. + // If missing, should be interpreted as "Ascending". Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. uint32 max_blocks = 6; // optional From f8a0b7a9569f9554db157ec9cc6b6a08dadc6eb6 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 12 Oct 2022 18:10:31 +0200 Subject: [PATCH 67/75] BlockId::Number refactor: trivial changes to BlockId::Hash (#12471) * Trivial BlockId::Number => Hash * missed BlockId::Hash added --- client/consensus/aura/src/lib.rs | 2 +- client/db/benches/state_access.rs | 2 +- test-utils/runtime/src/lib.rs | 6 +++--- utils/frame/benchmarking-cli/src/storage/cmd.rs | 2 +- utils/frame/benchmarking-cli/src/storage/read.rs | 2 +- utils/frame/benchmarking-cli/src/storage/write.rs | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index a0eed6e35310e..734cecca9b30b 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -769,7 +769,7 @@ mod tests { assert_eq!(client.chain_info().best_number, 0); assert_eq!( - authorities(&client, &BlockId::Number(0)).unwrap(), + authorities(&client, &BlockId::Hash(client.chain_info().best_hash)).unwrap(), vec![ Keyring::Alice.public().into(), Keyring::Bob.public().into(), diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 714dda82d61b7..4f4c10bcc8f53 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -84,7 +84,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 .map(|(k, v)| (k.clone(), Some(v.clone()))) .collect::>(); - let (state_root, tx) = db.state_at(BlockId::Number(number - 1)).unwrap().storage_root( + let (state_root, tx) = db.state_at(BlockId::Hash(parent_hash)).unwrap().storage_root( changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), StateVersion::V1, ); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 3db0e5510057b..8bda4ea602428 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1340,7 +1340,7 @@ mod tests { .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .set_heap_pages(8) .build(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); // Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger // than the heap. @@ -1369,7 +1369,7 @@ mod tests { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); runtime_api.test_storage(&block_id).unwrap(); } @@ -1396,7 +1396,7 @@ mod tests { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); runtime_api.test_witness(&block_id, proof, root).unwrap(); } diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 1d91e8f0b0517..a4c4188dfe073 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -191,7 +191,7 @@ impl StorageCmd { B: BlockT + Debug, BA: ClientBackend, { - let block = BlockId::Number(client.usage_info().chain.best_number); + let block = BlockId::Hash(client.usage_info().chain.best_hash); let empty_prefix = StorageKey(Vec::new()); let mut keys = client.storage_keys(&block, &empty_prefix)?; let (mut rng, _) = new_rng(None); diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index cba318f87ea98..5e8a310ea5c5a 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -41,7 +41,7 @@ impl StorageCmd { <::Header as HeaderT>::Number: From, { let mut record = BenchRecord::default(); - let block = BlockId::Number(client.usage_info().chain.best_number); + let block = BlockId::Hash(client.usage_info().chain.best_hash); info!("Preparing keys from block {}", block); // Load all keys and randomly shuffle them. diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 9a3821a7095f8..0ef2a2f9ae113 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -57,7 +57,7 @@ impl StorageCmd { // Store the time that it took to write each value. let mut record = BenchRecord::default(); - let block = BlockId::Number(client.usage_info().chain.best_number); + let block = BlockId::Hash(client.usage_info().chain.best_hash); let header = client.header(block)?.ok_or("Header not found")?; let original_root = *header.state_root(); let trie = DbStateBuilder::::new(storage.clone(), original_root).build(); From b324e511771b4e2d809d8319b98f3e36083c9ee2 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 12 Oct 2022 12:32:10 -0400 Subject: [PATCH 68/75] Dont use benchmark range on constant functions (#12456) * dont use benchmark range on constant function * update weights * fix * new weights * Update frame/examples/basic/src/benchmarking.rs Co-authored-by: parity-processbot <> --- frame/examples/basic/src/benchmarking.rs | 17 +++--- frame/examples/basic/src/lib.rs | 2 +- frame/examples/basic/src/weights.rs | 78 +++++++++++------------- 3 files changed, 46 insertions(+), 51 deletions(-) diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index 87d65a0bfa5b6..13f069c23e27b 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -34,25 +34,26 @@ use frame_system::RawOrigin; // Details on using the benchmarks macro can be seen at: // https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks benchmarks! { - // This will measure the execution time of `set_dummy` for b in [0..1000] range. + // This will measure the execution time of `set_dummy`. set_dummy_benchmark { - // This is the benchmark setup phase - let b in 0 .. 1000; - }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call + // This is the benchmark setup phase. + // `set_dummy` is a constant time function, hence we hard-code some random value here. + let value = 1000u32.into(); + }: set_dummy(RawOrigin::Root, value) // The execution phase is just running `set_dummy` extrinsic call verify { // This is the optional benchmark verification phase, asserting certain states. - assert_eq!(Pallet::::dummy(), Some(b.into())) + assert_eq!(Pallet::::dummy(), Some(value)) } - // This will measure the execution time of `accumulate_dummy` for b in [0..1000] range. + // This will measure the execution time of `accumulate_dummy`. // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same // as the extrinsic call. `_(...)` is used to represent the extrinsic name. // The benchmark verification phase is omitted. accumulate_dummy { - let b in 0 .. 1000; + let value = 1000u32.into(); // The caller account is whitelisted for DB reads/write by the benchmarking macro. let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), b.into()) + }: _(RawOrigin::Signed(caller), value) // This will measure the execution time of sorting a vector. sort_vector { diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index f754348782cec..256529421caae 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -498,7 +498,7 @@ pub mod pallet { // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the // benchmark toolchain. #[pallet::weight( - ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) + ::WeightInfo::accumulate_dummy() )] pub fn accumulate_dummy(origin: OriginFor, increase_by: T::Balance) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. diff --git a/frame/examples/basic/src/weights.rs b/frame/examples/basic/src/weights.rs index 986648b4302bc..a69f0824eac11 100644 --- a/frame/examples/basic/src/weights.rs +++ b/frame/examples/basic/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,34 +17,26 @@ //! Autogenerated weights for pallet_example_basic //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-03-15, STEPS: `[100, ]`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-10-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `Shawns-MacBook-Pro.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: // ./target/release/substrate // benchmark -// --chain -// dev -// --execution -// wasm -// --wasm-execution -// compiled -// --pallet -// pallet_example_basic -// --extrinsic -// * -// --steps -// 100 -// --repeat -// 10 -// --raw -// --output -// ./ +// pallet +// --chain=dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_example_basic +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --output=./ // --template // ./.maintain/frame-weight-template.hbs - #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -54,48 +46,50 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_example_basic. pub trait WeightInfo { - fn set_dummy_benchmark(b: u32, ) -> Weight; - fn accumulate_dummy(b: u32, ) -> Weight; + fn set_dummy_benchmark() -> Weight; + fn accumulate_dummy() -> Weight; fn sort_vector(x: u32, ) -> Weight; } /// Weights for pallet_example_basic using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - fn set_dummy_benchmark(b: u32, ) -> Weight { - Weight::from_ref_time(5_834_000 as u64) - .saturating_add(Weight::from_ref_time(24_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:0 w:1) + fn set_dummy_benchmark() -> Weight { + Weight::from_ref_time(19_000_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - fn accumulate_dummy(b: u32, ) -> Weight { - Weight::from_ref_time(51_353_000 as u64) - .saturating_add(Weight::from_ref_time(14_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:1 w:1) + fn accumulate_dummy() -> Weight { + Weight::from_ref_time(18_000_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } + /// The range of component `x` is `[0, 10000]`. fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(2_569_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(520 as u64).saturating_mul(x as u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - fn set_dummy_benchmark(b: u32, ) -> Weight { - Weight::from_ref_time(5_834_000 as u64) - .saturating_add(Weight::from_ref_time(24_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:0 w:1) + fn set_dummy_benchmark() -> Weight { + Weight::from_ref_time(19_000_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - fn accumulate_dummy(b: u32, ) -> Weight { - Weight::from_ref_time(51_353_000 as u64) - .saturating_add(Weight::from_ref_time(14_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:1 w:1) + fn accumulate_dummy() -> Weight { + Weight::from_ref_time(18_000_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } + /// The range of component `x` is `[0, 10000]`. fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(2_569_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(520 as u64).saturating_mul(x as u64)) } } From 94941a892e9c625124afa8f3b28aedc5a6274059 Mon Sep 17 00:00:00 2001 From: Dmitrii Markin Date: Thu, 13 Oct 2022 12:24:31 +0300 Subject: [PATCH 69/75] Punish peers for duplicate GRANDPA neighbor messages (#12462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Decrease peer reputation for duplicate GRANDPA neighbor messages. * Fix comparison * Fix update_peer_state() validity condition * Add negative test * Rework update_peer_state() validity condition, add tests * update_peer_state() validity condition: invert comparison * Split InvalidViewChange and DuplicateNeighborMessage misbehaviors * Enforce rate-limiting of duplicate GRANDPA neighbor packets * Update client/finality-grandpa/src/communication/gossip.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Make rolling clock back in a test safer Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- .../src/communication/gossip.rs | 151 +++++++++++++----- .../finality-grandpa/src/communication/mod.rs | 8 +- .../src/communication/periodic.rs | 17 +- 3 files changed, 124 insertions(+), 52 deletions(-) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 1ba5e0da33c96..218b4b668c10f 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -35,7 +35,8 @@ //! impolite to send messages about r+1 or later. "future-round" messages can //! be dropped and ignored. //! -//! It is impolite to send a neighbor packet which moves backwards in protocol state. +//! It is impolite to send a neighbor packet which moves backwards or does not progress +//! protocol state. //! //! This is beneficial if it conveys some progress in the protocol state of the peer. //! @@ -97,7 +98,7 @@ use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnbound use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use super::{benefit, cost, Round, SetId}; +use super::{benefit, cost, Round, SetId, NEIGHBOR_REBROADCAST_PERIOD}; use crate::{environment, CatchUp, CompactCommit, SignedMessage}; use std::{ @@ -148,14 +149,15 @@ enum Consider { /// A view of protocol state. #[derive(Debug)] struct View { - round: Round, // the current round we are at. - set_id: SetId, // the current voter set id. - last_commit: Option, // commit-finalized block height, if any. + round: Round, // the current round we are at. + set_id: SetId, // the current voter set id. + last_commit: Option, // commit-finalized block height, if any. + last_update: Option, // last time we heard from peer, used for spamming detection. } impl Default for View { fn default() -> Self { - View { round: Round(1), set_id: SetId(0), last_commit: None } + View { round: Round(1), set_id: SetId(0), last_commit: None, last_update: None } } } @@ -225,7 +227,12 @@ impl LocalView { /// Converts the local view to a `View` discarding round and set id /// information about the last commit. fn as_view(&self) -> View<&N> { - View { round: self.round, set_id: self.set_id, last_commit: self.last_commit_height() } + View { + round: self.round, + set_id: self.set_id, + last_commit: self.last_commit_height(), + last_update: None, + } } /// Update the set ID. implies a reset to round 1. @@ -417,6 +424,8 @@ pub(super) struct FullCatchUpMessage { pub(super) enum Misbehavior { // invalid neighbor message, considering the last one. InvalidViewChange, + // duplicate neighbor message. + DuplicateNeighborMessage, // could not decode neighbor message. bytes-length of the packet. UndecodablePacket(i32), // Bad catch up message (invalid signatures). @@ -438,6 +447,7 @@ impl Misbehavior { match *self { InvalidViewChange => cost::INVALID_VIEW_CHANGE, + DuplicateNeighborMessage => cost::DUPLICATE_NEIGHBOR_MESSAGE, UndecodablePacket(bytes) => ReputationChange::new( bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), "Grandpa: Bad packet", @@ -488,20 +498,22 @@ struct Peers { second_stage_peers: HashSet, /// The randomly picked set of `LUCKY_PEERS` light clients we'll gossip commit messages to. lucky_light_peers: HashSet, + /// Neighbor packet rebroadcast period --- we reduce the reputation of peers sending duplicate + /// packets too often. + neighbor_rebroadcast_period: Duration, } -impl Default for Peers { - fn default() -> Self { +impl Peers { + fn new(neighbor_rebroadcast_period: Duration) -> Self { Peers { inner: Default::default(), first_stage_peers: Default::default(), second_stage_peers: Default::default(), lucky_light_peers: Default::default(), + neighbor_rebroadcast_period, } } -} -impl Peers { fn new_peer(&mut self, who: PeerId, role: ObservedRole) { match role { ObservedRole::Authority if self.first_stage_peers.len() < LUCKY_PEERS => { @@ -547,10 +559,23 @@ impl Peers { return Err(Misbehavior::InvalidViewChange) } + let now = Instant::now(); + let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) == + (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); + + if duplicate_packet { + if let Some(last_update) = peer.view.last_update { + if now < last_update + self.neighbor_rebroadcast_period / 2 { + return Err(Misbehavior::DuplicateNeighborMessage) + } + } + } + peer.view = View { round: update.round, set_id: update.set_id, last_commit: Some(update.commit_finalized_height), + last_update: Some(now), }; trace!(target: "afg", "Peer {} updated view. Now at {:?}, {:?}", @@ -748,7 +773,7 @@ impl Inner { Inner { local_view: None, - peers: Peers::default(), + peers: Peers::new(NEIGHBOR_REBROADCAST_PERIOD), live_topics: KeepTopics::new(), next_rebroadcast: Instant::now() + REBROADCAST_AFTER, authorities: Vec::new(), @@ -758,13 +783,16 @@ impl Inner { } } - /// Note a round in the current set has started. + /// Note a round in the current set has started. Does nothing if the last + /// call to the function was with the same `round`. fn note_round(&mut self, round: Round) -> MaybeMessage { { let local_view = match self.local_view { None => return None, Some(ref mut v) => if v.round == round { + // Do not send neighbor packets out if `round` has not changed --- + // such behavior is punishable. return None } else { v @@ -803,6 +831,8 @@ impl Inner { ); self.authorities = authorities; } + // Do not send neighbor packets out if the `set_id` has not changed --- + // such behavior is punishable. return None } else { v @@ -816,7 +846,9 @@ impl Inner { self.multicast_neighbor_packet() } - /// Note that we've imported a commit finalizing a given block. + /// Note that we've imported a commit finalizing a given block. Does nothing if the last + /// call to the function was with the same or higher `finalized` number. + /// `set_id` & `round` are the ones the commit message is from. fn note_commit_finalized( &mut self, round: Round, @@ -1357,6 +1389,8 @@ impl GossipValidator { } /// Note that we've imported a commit finalizing a given block. + /// `set_id` & `round` are the ones the commit message is from and not necessarily + /// the latest set ID & round started. pub(super) fn note_commit_finalized( &self, round: Round, @@ -1647,11 +1681,12 @@ pub(super) struct PeerReport { #[cfg(test)] mod tests { - use super::{environment::SharedVoterSetState, *}; + use super::{super::NEIGHBOR_REBROADCAST_PERIOD, environment::SharedVoterSetState, *}; use crate::communication; use sc_network::config::Role; use sc_network_gossip::Validator as GossipValidatorT; use sp_core::{crypto::UncheckedFrom, H256}; + use std::time::Instant; use substrate_test_runtime_client::runtime::{Block, Header}; // some random config (not really needed) @@ -1684,7 +1719,12 @@ mod tests { #[test] fn view_vote_rules() { - let view = View { round: Round(100), set_id: SetId(1), last_commit: Some(1000u64) }; + let view = View { + round: Round(100), + set_id: SetId(1), + last_commit: Some(1000u64), + last_update: None, + }; assert_eq!(view.consider_vote(Round(98), SetId(1)), Consider::RejectPast); assert_eq!(view.consider_vote(Round(1), SetId(0)), Consider::RejectPast); @@ -1701,7 +1741,12 @@ mod tests { #[test] fn view_global_message_rules() { - let view = View { round: Round(100), set_id: SetId(2), last_commit: Some(1000u64) }; + let view = View { + round: Round(100), + set_id: SetId(2), + last_commit: Some(1000u64), + last_update: None, + }; assert_eq!(view.consider_global(SetId(3), 1), Consider::RejectFuture); assert_eq!(view.consider_global(SetId(3), 1000), Consider::RejectFuture); @@ -1719,7 +1764,7 @@ mod tests { #[test] fn unknown_peer_cannot_be_updated() { - let mut peers = Peers::default(); + let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); let id = PeerId::random(); let update = @@ -1750,27 +1795,35 @@ mod tests { let update4 = NeighborPacket { round: Round(3), set_id: SetId(11), commit_finalized_height: 80 }; - let mut peers = Peers::default(); + // Use shorter rebroadcast period to safely roll the clock back in the last test + // and don't hit the system boot time on systems with unsigned time. + const SHORT_NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(1); + let mut peers = Peers::new(SHORT_NEIGHBOR_REBROADCAST_PERIOD); let id = PeerId::random(); peers.new_peer(id, ObservedRole::Authority); - let mut check_update = move |update: NeighborPacket<_>| { + let check_update = |peers: &mut Peers<_>, update: NeighborPacket<_>| { let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); assert_eq!(view.round, update.round); assert_eq!(view.set_id, update.set_id); assert_eq!(view.last_commit, Some(update.commit_finalized_height)); }; - check_update(update1); - check_update(update2); - check_update(update3); - check_update(update4); + check_update(&mut peers, update1); + check_update(&mut peers, update2); + check_update(&mut peers, update3); + check_update(&mut peers, update4.clone()); + + // Allow duplicate neighbor packets if enough time has passed. + peers.inner.get_mut(&id).unwrap().view.last_update = + Some(Instant::now() - SHORT_NEIGHBOR_REBROADCAST_PERIOD); + check_update(&mut peers, update4); } #[test] fn invalid_view_change() { - let mut peers = Peers::default(); + let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); let id = PeerId::random(); peers.new_peer(id, ObservedRole::Authority); @@ -1783,29 +1836,41 @@ mod tests { .unwrap() .unwrap(); - let mut check_update = move |update: NeighborPacket<_>| { + let mut check_update = move |update: NeighborPacket<_>, misbehavior| { let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); - assert_eq!(err, Misbehavior::InvalidViewChange); + assert_eq!(err, misbehavior); }; // round moves backwards. - check_update(NeighborPacket { - round: Round(9), - set_id: SetId(10), - commit_finalized_height: 10, - }); - // commit finalized height moves backwards. - check_update(NeighborPacket { - round: Round(10), - set_id: SetId(10), - commit_finalized_height: 9, - }); + check_update( + NeighborPacket { round: Round(9), set_id: SetId(10), commit_finalized_height: 10 }, + Misbehavior::InvalidViewChange, + ); // set ID moves backwards. - check_update(NeighborPacket { - round: Round(10), - set_id: SetId(9), - commit_finalized_height: 10, - }); + check_update( + NeighborPacket { round: Round(10), set_id: SetId(9), commit_finalized_height: 10 }, + Misbehavior::InvalidViewChange, + ); + // commit finalized height moves backwards. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); + // duplicate packet without grace period. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 10 }, + Misbehavior::DuplicateNeighborMessage, + ); + // commit finalized height moves backwards while round moves forward. + check_update( + NeighborPacket { round: Round(11), set_id: SetId(10), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); + // commit finalized height moves backwards while set ID moves forward. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(11), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); } #[test] diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 12cb2601f4c26..75a7697812c6c 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -37,6 +37,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, + time::Duration, }; use finality_grandpa::{ @@ -68,6 +69,9 @@ mod periodic; #[cfg(test)] pub(crate) mod tests; +// How often to rebroadcast neighbor packets, in cases where no new packets are created. +pub(crate) const NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(2 * 60); + pub mod grandpa_protocol_name { use sc_chain_spec::ChainSpec; use sc_network_common::protocol::ProtocolName; @@ -103,6 +107,8 @@ mod cost { pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "Grandpa: Unknown voter"); pub(super) const INVALID_VIEW_CHANGE: Rep = Rep::new(-500, "Grandpa: Invalid view change"); + pub(super) const DUPLICATE_NEIGHBOR_MESSAGE: Rep = + Rep::new(-500, "Grandpa: Duplicate neighbor message without grace period"); pub(super) const PER_UNDECODABLE_BYTE: i32 = -5; pub(super) const PER_SIGNATURE_CHECKED: i32 = -25; pub(super) const PER_BLOCK_LOADED: i32 = -10; @@ -279,7 +285,7 @@ impl> NetworkBridge { } let (neighbor_packet_worker, neighbor_packet_sender) = - periodic::NeighborPacketWorker::new(); + periodic::NeighborPacketWorker::new(NEIGHBOR_REBROADCAST_PERIOD); NetworkBridge { service, diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index e6d63beafc362..c001796b5ca5d 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -32,9 +32,6 @@ use super::gossip::{GossipMessage, NeighborPacket}; use sc_network::PeerId; use sp_runtime::traits::{Block as BlockT, NumberFor}; -// How often to rebroadcast, in cases where no new packets are created. -const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); - /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( @@ -60,6 +57,7 @@ impl NeighborPacketSender { /// implementation). Periodically it sends out the last packet in cases where no new ones arrive. pub(super) struct NeighborPacketWorker { last: Option<(Vec, NeighborPacket>)>, + rebroadcast_period: Duration, delay: Delay, rx: TracingUnboundedReceiver<(Vec, NeighborPacket>)>, } @@ -67,13 +65,16 @@ pub(super) struct NeighborPacketWorker { impl Unpin for NeighborPacketWorker {} impl NeighborPacketWorker { - pub(super) fn new() -> (Self, NeighborPacketSender) { + pub(super) fn new(rebroadcast_period: Duration) -> (Self, NeighborPacketSender) { let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( "mpsc_grandpa_neighbor_packet_worker", ); - let delay = Delay::new(REBROADCAST_AFTER); + let delay = Delay::new(rebroadcast_period); - (NeighborPacketWorker { last: None, delay, rx }, NeighborPacketSender(tx)) + ( + NeighborPacketWorker { last: None, rebroadcast_period, delay, rx }, + NeighborPacketSender(tx), + ) } } @@ -85,7 +86,7 @@ impl Stream for NeighborPacketWorker { match this.rx.poll_next_unpin(cx) { Poll::Ready(None) => return Poll::Ready(None), Poll::Ready(Some((to, packet))) => { - this.delay.reset(REBROADCAST_AFTER); + this.delay.reset(this.rebroadcast_period); this.last = Some((to.clone(), packet.clone())); return Poll::Ready(Some((to, GossipMessage::::from(packet)))) @@ -98,7 +99,7 @@ impl Stream for NeighborPacketWorker { // Getting this far here implies that the timer fired. - this.delay.reset(REBROADCAST_AFTER); + this.delay.reset(this.rebroadcast_period); // Make sure the underlying task is scheduled for wake-up. // From 983b6b0e5d93a3f7d99d8b3d3a8bb398af3ec045 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Thu, 13 Oct 2022 12:37:09 +0300 Subject: [PATCH 70/75] Introduce mockable `ChainSync` object for testing (#12480) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Introduce mockable `ChainSync` object for testing `mockall` allows to mock `ChainSync` and to verify that the calls made to `ChaiSync` are firstly executed at all, that they're executed in correct order and with correct parameters. This allows to verify, e.g., that delegating calls directly to `ChainSync` from `NetworkService` still calls the correct functions with correct arguments even if `Protocol` middleman is removed. * Add Cargo.lock * Fix tests * Update client/network/Cargo.toml Co-authored-by: Bastian Köcher * Update Cargo.lock * Fix clippy and documentation Co-authored-by: Bastian Köcher Co-authored-by: parity-processbot <> --- Cargo.lock | 58 +++ client/network/common/src/sync.rs | 14 +- client/network/src/protocol.rs | 2 +- client/network/src/service.rs | 2 + client/network/src/service/chainsync_tests.rs | 339 ++++++++++++++++++ client/network/sync/Cargo.toml | 1 + client/network/sync/src/lib.rs | 35 +- client/network/sync/src/mock.rs | 118 ++++++ 8 files changed, 546 insertions(+), 23 deletions(-) create mode 100644 client/network/src/service/chainsync_tests.rs create mode 100644 client/network/sync/src/mock.rs diff --git a/Cargo.lock b/Cargo.lock index d29023f330ce1..062195c70f8ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1717,6 +1717,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "downcast-rs" version = "1.2.0" @@ -2093,6 +2099,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2116,6 +2131,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" + [[package]] name = "frame-benchmarking" version = "4.0.0-dev" @@ -4395,6 +4416,33 @@ dependencies = [ "windows-sys 0.36.1", ] +[[package]] +name = "mockall" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" +dependencies = [ + "cfg-if 1.0.0", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "more-asserts" version = "0.2.1" @@ -4970,6 +5018,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num-bigint" version = "0.2.6" @@ -6942,8 +6996,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c143348f141cc87aab5b950021bac6145d0e5ae754b0591de23244cee42c9308" dependencies = [ "difflib", + "float-cmp", "itertools", + "normalize-line-endings", "predicates-core", + "regex", ] [[package]] @@ -8586,6 +8643,7 @@ dependencies = [ "libp2p", "log", "lru", + "mockall", "parity-scale-codec", "prost 0.11.0", "prost-build 0.11.1", diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 020b2c9efa4c7..d3603c6792c84 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -269,12 +269,14 @@ pub trait ChainSync: Send { ); /// Get an iterator over all scheduled justification requests. - fn justification_requests( - &mut self, - ) -> Box)> + '_>; + fn justification_requests<'a>( + &'a mut self, + ) -> Box)> + 'a>; /// Get an iterator over all block requests of all peers. - fn block_requests(&mut self) -> Box)> + '_>; + fn block_requests<'a>( + &'a mut self, + ) -> Box)> + 'a>; /// Get a state request, if any. fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; @@ -359,9 +361,9 @@ pub trait ChainSync: Send { /// /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to /// import passed header (call `on_block_data`). The network request isn't sent in this case. - fn poll_block_announce_validation( + fn poll_block_announce_validation<'a>( &mut self, - cx: &mut std::task::Context, + cx: &mut std::task::Context<'a>, ) -> Poll>; /// Call when a peer has disconnected. diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 325e044527efa..c3def8adc6cfe 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1440,7 +1440,7 @@ where for (id, request) in self .chain_sync .block_requests() - .map(|(peer_id, request)| (*peer_id, request)) + .map(|(peer_id, request)| (peer_id, request)) .collect::>() { let event = diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 28e479b702779..25916041285a3 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -92,6 +92,8 @@ use std::{ pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; +#[cfg(test)] +mod chainsync_tests; mod metrics; mod out_events; #[cfg(test)] diff --git a/client/network/src/service/chainsync_tests.rs b/client/network/src/service/chainsync_tests.rs new file mode 100644 index 0000000000000..ca44c65d267f4 --- /dev/null +++ b/client/network/src/service/chainsync_tests.rs @@ -0,0 +1,339 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{config, NetworkWorker}; + +use futures::prelude::*; +use libp2p::PeerId; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{BlockBackend, HeaderBackend}; +use sc_consensus::JustificationSyncLink; +use sc_network_common::{ + config::{ + NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, + TransportConfig, + }, + protocol::role::Roles, + service::NetworkSyncForkRequest, + sync::{message::BlockAnnouncesHandshake, ChainSync as ChainSyncT, SyncState, SyncStatus}, +}; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, mock::MockChainSync, + state_request_handler::StateRequestHandler, +}; +use sp_core::H256; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as _, Zero}, +}; +use std::{iter, sync::Arc, task::Poll}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +type TestNetworkWorker = NetworkWorker< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, + substrate_test_runtime_client::TestClient, +>; + +const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; +const PROTOCOL_NAME: &str = "/foo"; + +fn make_network( + chain_sync: Box>, + client: Arc, +) -> (TestNetworkWorker, Arc) { + let network_config = config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME.into(), + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + handshake: None, + set_config: Default::default(), + }], + listen_addresses: vec![config::build_multiaddr![Memory(rand::random::())]], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }; + + #[derive(Clone)] + struct PassThroughVerifier(bool); + + #[async_trait::async_trait] + impl sc_consensus::Verifier for PassThroughVerifier { + async fn verify( + &mut self, + mut block: sc_consensus::BlockImportParams, + ) -> Result< + ( + sc_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = block + .header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( + b"babe", + )) + }) + }) + .map(|blob| { + vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] + }); + + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) + } + } + + let import_queue = Box::new(sc_consensus::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let protocol_id = ProtocolId::from("/test-protocol-name"); + + let fork_id = Some(String::from("test-fork-id")); + + let block_request_protocol_config = { + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let state_request_protocol_config = { + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, None, client.clone()); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let block_announce_config = NonDefaultSetConfig { + notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), + fallback_names: vec![], + max_notification_size: 1024 * 1024, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< + substrate_test_runtime_client::runtime::Block, + >::build( + Roles::from(&config::Role::Full), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ))), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + }; + + let worker = NetworkWorker::new(config::Params { + block_announce_config, + role: config::Role::Full, + executor: None, + network_config, + chain: client.clone(), + protocol_id, + fork_id, + import_queue, + chain_sync, + metrics_registry: None, + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync_protocol_config: None, + request_response_protocol_configs: Vec::new(), + }) + .unwrap(); + + (worker, client) +} + +fn set_default_expecations_no_peers( + chain_sync: &mut MockChainSync, +) { + chain_sync.expect_block_requests().returning(|| Box::new(iter::empty())); + chain_sync.expect_state_request().returning(|| None); + chain_sync.expect_justification_requests().returning(|| Box::new(iter::empty())); + chain_sync.expect_warp_sync_request().returning(|| None); + chain_sync.expect_poll_block_announce_validation().returning(|_| Poll::Pending); + chain_sync.expect_status().returning(|| SyncStatus { + state: SyncState::Idle, + best_seen_block: None, + num_peers: 0u32, + queued_blocks: 0u32, + state_sync: None, + warp_sync: None, + }); +} + +#[async_std::test] +async fn normal_network_poll_no_peers() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + set_default_expecations_no_peers(&mut chain_sync); + + let (mut network, _) = make_network(chain_sync, client); + + // poll the network once + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn request_justification() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + let hash = H256::random(); + let number = 1337u64; + + chain_sync + .expect_request_justification() + .withf(move |in_hash, in_number| &hash == in_hash && &number == in_number) + .once() + .returning(|_, _| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "request justifiction" message and poll the network + network.service().request_justification(&hash, number); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn clear_justification_requests(&mut self) { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + chain_sync.expect_clear_justification_requests().once().returning(|| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "request justifiction" message and poll the network + network.service().clear_justification_requests(); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn set_sync_fork_request() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + let hash = H256::random(); + let number = 1337u64; + let peers = (0..3).map(|_| PeerId::random()).collect::>(); + let copy_peers = peers.clone(); + + chain_sync + .expect_set_sync_fork_request() + .withf(move |in_peers, in_hash, in_number| { + &peers == in_peers && &hash == in_hash && &number == in_number + }) + .once() + .returning(|_, _, _| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "set sync fork request" message and poll the network + network.service().set_sync_fork_request(copy_peers, hash, number); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn on_block_finalized() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + let at = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap().hash(); + let block = client + .new_block_at(&BlockId::Hash(at), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + let header = block.header.clone(); + let block_number = *header.number(); + let hash = block.hash(); + + chain_sync + .expect_on_block_finalized() + .withf(move |in_hash, in_number| &hash == in_hash && &block_number == in_number) + .once() + .returning(|_, _| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "set sync fork request" message and poll the network + network.on_block_finalized(hash, header); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 24d418f7233d7..9d032f5cca96c 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -25,6 +25,7 @@ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" +mockall = "0.11.2" prost = "0.11" smallvec = "1.8.0" thiserror = "1.0" diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 280e530eca9a9..84998c747b3cc 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -30,6 +30,7 @@ pub mod block_request_handler; pub mod blocks; +pub mod mock; mod schema; pub mod state; pub mod state_request_handler; @@ -643,9 +644,9 @@ where .extend(peers); } - fn justification_requests( - &mut self, - ) -> Box)> + '_> { + fn justification_requests<'a>( + &'a mut self, + ) -> Box)> + 'a> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); Box::new(std::iter::from_fn(move || { @@ -670,7 +671,9 @@ where })) } - fn block_requests(&mut self) -> Box)> + '_> { + fn block_requests<'a>( + &'a mut self, + ) -> Box)> + 'a> { if self.mode == SyncMode::Warp { return Box::new(std::iter::once(self.warp_target_block_request()).flatten()) } @@ -695,8 +698,8 @@ where let allowed_requests = self.allowed_requests.take(); let max_parallel = if major_sync { 1 } else { self.max_parallel_downloads }; let gap_sync = &mut self.gap_sync; - let iter = self.peers.iter_mut().filter_map(move |(id, peer)| { - if !peer.state.is_available() || !allowed_requests.contains(id) { + let iter = self.peers.iter_mut().filter_map(move |(&id, peer)| { + if !peer.state.is_available() || !allowed_requests.contains(&id) { return None } @@ -725,7 +728,7 @@ where }; Some((id, ancestry_request::(current))) } else if let Some((range, req)) = peer_block_request( - id, + &id, peer, blocks, attrs, @@ -744,7 +747,7 @@ where ); Some((id, req)) } else if let Some((hash, req)) = - fork_sync_request(id, fork_targets, best_queued, last_finalized, attrs, |hash| { + fork_sync_request(&id, fork_targets, best_queued, last_finalized, attrs, |hash| { if queue.contains(hash) { BlockStatus::Queued } else { @@ -756,7 +759,7 @@ where Some((id, req)) } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { peer_gap_block_request( - id, + &id, peer, &mut sync.blocks, attrs, @@ -2216,7 +2219,7 @@ where } /// Generate block request for downloading of the target block body during warp sync. - fn warp_target_block_request(&mut self) -> Option<(&PeerId, BlockRequest)> { + fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { if let Some(sync) = &self.warp_sync { if self.allowed_requests.is_empty() || sync.is_complete() || @@ -2234,7 +2237,7 @@ where trace!(target: "sync", "New warp target block request for {}", id); peer.state = PeerSyncState::DownloadingWarpTargetBlock; self.allowed_requests.clear(); - return Some((id, request)) + return Some((*id, request)) } } } @@ -2482,7 +2485,7 @@ fn fork_sync_request( true }); for (hash, r) in targets { - if !r.peers.contains(id) { + if !r.peers.contains(&id) { continue } // Download the fork only if it is behind or not too far ahead our tip of the chain @@ -2740,7 +2743,7 @@ mod test { // we wil send block requests to these peers // for these blocks we don't know about - assert!(sync.block_requests().all(|(p, _)| { *p == peer_id1 || *p == peer_id2 })); + assert!(sync.block_requests().all(|(p, _)| { p == peer_id1 || p == peer_id2 })); // add a new peer at a known block sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); @@ -2835,7 +2838,7 @@ mod test { log::trace!(target: "sync", "Requests: {:?}", requests); assert_eq!(1, requests.len()); - assert_eq!(peer, requests[0].0); + assert_eq!(*peer, requests[0].0); let request = requests[0].1.clone(); @@ -3065,9 +3068,9 @@ mod test { send_block_announce(best_block.header().clone(), &peer_id2, &mut sync); let (peer1_req, peer2_req) = sync.block_requests().fold((None, None), |res, req| { - if req.0 == &peer_id1 { + if req.0 == peer_id1 { (Some(req.1), res.1) - } else if req.0 == &peer_id2 { + } else if req.0 == peer_id2 { (res.0, Some(req.1)) } else { panic!("Unexpected req: {:?}", req) diff --git a/client/network/sync/src/mock.rs b/client/network/sync/src/mock.rs new file mode 100644 index 0000000000000..2a3b059f735b2 --- /dev/null +++ b/client/network/sync/src/mock.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Contains a mock implementation of `ChainSync` that can be used +//! for testing calls made to `ChainSync`. + +use futures::task::Poll; +use libp2p::PeerId; +use sc_consensus::{BlockImportError, BlockImportStatus}; +use sc_network_common::sync::{ + message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, + warp::{EncodedProof, WarpProofRequest}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, + PollBlockAnnounceValidation, SyncStatus, +}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +mockall::mock! { + pub ChainSync {} + + impl ChainSyncT for ChainSync { + fn peer_info(&self, who: &PeerId) -> Option>; + fn status(&self) -> SyncStatus; + fn num_sync_requests(&self) -> usize; + fn num_downloaded_blocks(&self) -> usize; + fn num_peers(&self) -> usize; + fn new_peer( + &mut self, + who: PeerId, + best_hash: Block::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer>; + fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); + fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); + fn clear_justification_requests(&mut self); + fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &Block::Hash, + number: NumberFor, + ); + fn justification_requests<'a>( + &'a mut self, + ) -> Box)> + 'a>; + fn block_requests<'a>(&'a mut self) -> Box)> + 'a>; + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)>; + fn on_block_data( + &mut self, + who: &PeerId, + request: Option>, + response: BlockResponse, + ) -> Result, BadPeer>; + fn on_state_data( + &mut self, + who: &PeerId, + response: OpaqueStateResponse, + ) -> Result, BadPeer>; + fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer>; + fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer>; + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, Block::Hash)>, + ) -> Box), BadPeer>>>; + fn on_justification_import( + &mut self, + hash: Block::Hash, + number: NumberFor, + success: bool, + ); + fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); + fn push_block_announce_validation( + &mut self, + who: PeerId, + hash: Block::Hash, + announce: BlockAnnounce, + is_best: bool, + ); + fn poll_block_announce_validation<'a>( + &mut self, + cx: &mut std::task::Context<'a>, + ) -> Poll>; + fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + fn metrics(&self) -> Metrics; + fn create_opaque_block_request(&self, request: &BlockRequest) -> OpaqueBlockRequest; + fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String>; + fn decode_block_response(&self, response: &[u8]) -> Result; + fn block_response_into_blocks( + &self, + request: &BlockRequest, + response: OpaqueBlockResponse, + ) -> Result>, String>; + fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String>; + fn decode_state_response(&self, response: &[u8]) -> Result; + } +} From 1f39c9029eb83e9432d86877f0694f643b7dd968 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Thu, 13 Oct 2022 12:13:56 +0200 Subject: [PATCH 71/75] pallet-mmr: RPC API and Runtime API work with block numbers (#12345) * pallet-mmr: RPC API works with block_numbers * fixes * update rpc * fmt * final touches in the rpc * temporary fix * fix * fmt * docs * Update lib.rs * use NumberFor * validate input * update runtime * convert block_number to u64 * small edit * update runtime api * test fix * runtime fix * update test function * fmt * fix nits * remove block_num_to_leaf_index from runtime api * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Robert Hambrock * fix tests * get the code to compile after merge * get the tests to compile * fix in tests? * fix test * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * Update primitives/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * fix errors & nits * change block_num_to_leaf_index * don't make any assumptions * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * fix * small fix * use best_known_block_number * best_known_block_number instead of leaves_count * more readable? * remove warning * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Robert Hambrock * simplify * update docs * nits * fmt & fix * merge fixes * fix * small fix * docs & nit fixes * Nit fixes * remove leaf_indices_to_block_numbers() * fmt Co-authored-by: Robert Hambrock Co-authored-by: Adrian Catangiu --- bin/node/rpc/src/lib.rs | 6 +- bin/node/runtime/src/lib.rs | 20 +++-- client/beefy/src/lib.rs | 4 +- client/beefy/src/tests.rs | 16 ++-- client/beefy/src/worker.rs | 2 +- frame/merkle-mountain-range/rpc/src/lib.rs | 59 +++++++-------- frame/merkle-mountain-range/src/lib.rs | 56 +++++++++++--- frame/merkle-mountain-range/src/tests.rs | 81 +++++++++++---------- primitives/beefy/src/mmr.rs | 6 +- primitives/merkle-mountain-range/src/lib.rs | 23 +++--- 10 files changed, 160 insertions(+), 113 deletions(-) diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 94e01619c6e63..8596fe23321ba 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -108,7 +108,11 @@ where + Send + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, + C::Api: pallet_mmr_rpc::MmrRuntimeApi< + Block, + ::Hash, + BlockNumber, + >, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 142173621036d..f137b36eff036 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2016,11 +2016,15 @@ impl_runtime_apis! { } } - impl pallet_mmr::primitives::MmrApi for Runtime { - fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) + impl pallet_mmr::primitives::MmrApi< + Block, + mmr::Hash, + BlockNumber, + > for Runtime { + fn generate_proof(block_number: BlockNumber) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { - Mmr::generate_batch_proof(vec![leaf_index]).and_then(|(leaves, proof)| + Mmr::generate_batch_proof(vec![block_number]).and_then(|(leaves, proof)| Ok(( mmr::EncodableOpaqueLeaf::from_leaf(&leaves[0]), mmr::BatchProof::into_single_leaf_proof(proof)? @@ -2052,9 +2056,9 @@ impl_runtime_apis! { } fn generate_batch_proof( - leaf_indices: Vec, + block_numbers: Vec, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_batch_proof(leaf_indices).map(|(leaves, proof)| { + Mmr::generate_batch_proof(block_numbers).map(|(leaves, proof)| { ( leaves .into_iter() @@ -2066,10 +2070,10 @@ impl_runtime_apis! { } fn generate_historical_batch_proof( - leaf_indices: Vec, - leaves_count: pallet_mmr::primitives::LeafIndex, + block_numbers: Vec, + best_known_block_number: BlockNumber, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_historical_batch_proof(leaf_indices, leaves_count).map( + Mmr::generate_historical_batch_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( leaves diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 1c61cac072207..441f6e4248117 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -24,7 +24,7 @@ use sc_consensus::BlockImport; use sc_network::ProtocolName; use sc_network_common::service::NetworkRequest; use sc_network_gossip::Network as GossipNetwork; -use sp_api::ProvideRuntimeApi; +use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; @@ -200,7 +200,7 @@ where C: Client + BlockBackend, P: PayloadProvider, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi, + R::Api: BeefyApi + MmrApi>, N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { let BeefyParams { diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 24cf89acd5734..89be1cac4f886 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -43,9 +43,7 @@ use beefy_primitives::{ KEY_TYPE as BeefyKeyType, }; use sc_network::{config::RequestResponseConfig, ProtocolName}; -use sp_mmr_primitives::{ - BatchProof, EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof, -}; +use sp_mmr_primitives::{BatchProof, EncodableOpaqueLeaf, Error as MmrError, MmrApi, Proof}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_consensus::BlockOrigin; @@ -247,8 +245,8 @@ macro_rules! create_test_api { } } - impl MmrApi for RuntimeApi { - fn generate_proof(_leaf_index: LeafIndex) + impl MmrApi> for RuntimeApi { + fn generate_proof(_block_number: u64) -> Result<(EncodableOpaqueLeaf, Proof), MmrError> { unimplemented!() } @@ -270,13 +268,13 @@ macro_rules! create_test_api { Ok($mmr_root) } - fn generate_batch_proof(_leaf_indices: Vec) -> Result<(Vec, BatchProof), MmrError> { + fn generate_batch_proof(_block_numbers: Vec) -> Result<(Vec, BatchProof), MmrError> { unimplemented!() } fn generate_historical_batch_proof( - _leaf_indices: Vec, - _leaves_count: LeafIndex + _block_numbers: Vec, + _best_known_block_number: u64 ) -> Result<(Vec, BatchProof), MmrError> { unimplemented!() } @@ -349,7 +347,7 @@ fn initialize_beefy( ) -> impl Future where API: ProvideRuntimeApi + Default + Sync + Send, - API::Api: BeefyApi + MmrApi, + API::Api: BeefyApi + MmrApi>, { let tasks = FuturesUnordered::new(); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index a21807c8ee875..4381081f74ebd 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -252,7 +252,7 @@ where C: Client, P: PayloadProvider, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi, + R::Api: BeefyApi + MmrApi>, N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, { /// Return a new BEEFY worker instance. diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index e939ff8ae7cd0..ffc7ac2da56bf 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -30,10 +30,10 @@ use jsonrpsee::{ }; use serde::{Deserialize, Serialize}; -use sp_api::ProvideRuntimeApi; +use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_mmr_primitives::{BatchProof, Error as MmrError, LeafIndex, Proof}; +use sp_mmr_primitives::{BatchProof, Error as MmrError, Proof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; @@ -96,11 +96,11 @@ impl LeafBatchProof { /// MMR RPC methods. #[rpc(client, server)] -pub trait MmrApi { - /// Generate MMR proof for given leaf index. +pub trait MmrApi { + /// Generate MMR proof for given block number. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// MMR proof for leaf at given `leaf_index`. + /// MMR proof for a block with a specified `block_number`. /// Optionally, a block hash at which the runtime should be queried can be specified. /// /// Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of @@ -108,49 +108,49 @@ pub trait MmrApi { #[method(name = "mmr_generateProof")] fn generate_proof( &self, - leaf_index: LeafIndex, + block_number: BlockNumber, at: Option, ) -> RpcResult>; - /// Generate MMR proof for the given leaf indices. + /// Generate MMR proof for the given block numbers. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// MMR proof for a set of leaves at the given `leaf_indices`. + /// MMR proof for a set of blocks with the specific `block_numbers`. /// Optionally, a block hash at which the runtime should be queried can be specified. /// /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of /// the leaves). Both parameters are SCALE-encoded. /// The order of entries in the `leaves` field of the returned struct - /// is the same as the order of the entries in `leaf_indices` supplied + /// is the same as the order of the entries in `block_numbers` supplied #[method(name = "mmr_generateBatchProof")] fn generate_batch_proof( &self, - leaf_indices: Vec, + block_numbers: Vec, at: Option, ) -> RpcResult>; - /// Generate a MMR proof for the given `leaf_indices` of the MMR that had `leaves_count` leaves. + /// Generate a MMR proof for the given `block_numbers` given the `best_known_block_number`. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// a MMR proof for the set of leaves at the given `leaf_indices` with MMR fixed to the state - /// with exactly `leaves_count` leaves. `leaves_count` must be larger than all `leaf_indices` - /// for the function to succeed. + /// a MMR proof for the set of blocks that have the given `block_numbers` with MMR given the + /// `best_known_block_number`. `best_known_block_number` must be larger than all the + /// `block_numbers` for the function to succeed. /// /// Optionally, a block hash at which the runtime should be queried can be specified. /// Note that specifying the block hash isn't super-useful here, unless you're generating /// proof using non-finalized blocks where there are several competing forks. That's because - /// MMR state will be fixed to the state with `leaves_count`, which already points to some - /// historical block. + /// MMR state will be fixed to the state with `best_known_block_number`, which already points to + /// some historical block. /// /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of /// the leaves). Both parameters are SCALE-encoded. /// The order of entries in the `leaves` field of the returned struct - /// is the same as the order of the entries in `leaf_indices` supplied + /// is the same as the order of the entries in `block_numbers` supplied #[method(name = "mmr_generateHistoricalBatchProof")] fn generate_historical_batch_proof( &self, - leaf_indices: Vec, - leaves_count: LeafIndex, + block_numbers: Vec, + best_known_block_number: BlockNumber, at: Option, ) -> RpcResult>; } @@ -169,16 +169,17 @@ impl Mmr { } #[async_trait] -impl MmrApiServer<::Hash> for Mmr +impl MmrApiServer<::Hash, NumberFor> + for Mmr where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - Client::Api: MmrRuntimeApi, + Client::Api: MmrRuntimeApi>, MmrHash: Codec + Send + Sync + 'static, { fn generate_proof( &self, - leaf_index: LeafIndex, + block_number: NumberFor, at: Option<::Hash>, ) -> RpcResult> { let api = self.client.runtime_api(); @@ -188,7 +189,7 @@ where .generate_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), - leaf_index, + block_number, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; @@ -198,7 +199,7 @@ where fn generate_batch_proof( &self, - leaf_indices: Vec, + block_numbers: Vec>, at: Option<::Hash>, ) -> RpcResult::Hash>> { let api = self.client.runtime_api(); @@ -210,7 +211,7 @@ where .generate_batch_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), - leaf_indices, + block_numbers, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; @@ -220,8 +221,8 @@ where fn generate_historical_batch_proof( &self, - leaf_indices: Vec, - leaves_count: LeafIndex, + block_numbers: Vec>, + best_known_block_number: NumberFor, at: Option<::Hash>, ) -> RpcResult::Hash>> { let api = self.client.runtime_api(); @@ -233,8 +234,8 @@ where .generate_historical_batch_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), - leaf_indices, - leaves_count, + block_numbers, + best_known_block_number, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 8b4f2b60bc198..ad3ce340496e8 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -59,7 +59,7 @@ use codec::Encode; use frame_support::weights::Weight; use sp_runtime::{ - traits::{self, One, Saturating}, + traits::{self, CheckedSub, One, Saturating}, SaturatedConversion, }; @@ -318,37 +318,73 @@ impl, I: 'static> Pallet { .saturating_add(leaf_index.saturated_into()) } - /// Generate a MMR proof for the given `leaf_indices`. + /// Convert a `block_num` into a leaf index. + fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result + where + T: frame_system::Config, + { + // leaf_idx = (leaves_count - 1) - (current_block_num - block_num); + let best_block_num = >::block_number(); + let blocks_diff = best_block_num.checked_sub(&block_num).ok_or_else(|| { + primitives::Error::BlockNumToLeafIndex + .log_debug("The provided block_number is greater than the best block number.") + })?; + let blocks_diff_as_leaf_idx = blocks_diff.try_into().map_err(|_| { + primitives::Error::BlockNumToLeafIndex + .log_debug("The `blocks_diff` couldn't be converted to `LeafIndex`.") + })?; + + let leaf_idx = Self::mmr_leaves() + .checked_sub(1) + .and_then(|last_leaf_idx| last_leaf_idx.checked_sub(blocks_diff_as_leaf_idx)) + .ok_or_else(|| { + primitives::Error::BlockNumToLeafIndex + .log_debug("There aren't enough leaves in the chain.") + })?; + Ok(leaf_idx) + } + + /// Generate a MMR proof for the given `block_numbers`. /// /// Note this method can only be used from an off-chain context /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_batch_proof( - leaf_indices: Vec, + block_numbers: Vec, ) -> Result< (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, > { - Self::generate_historical_batch_proof(leaf_indices, Self::mmr_leaves()) + Self::generate_historical_batch_proof( + block_numbers, + >::block_number(), + ) } - /// Generate a MMR proof for the given `leaf_indices` for the MMR of `leaves_count` size. + /// Generate a MMR proof for the given `block_numbers` given the `best_known_block_number`. /// /// Note this method can only be used from an off-chain context /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_historical_batch_proof( - leaf_indices: Vec, - leaves_count: LeafIndex, + block_numbers: Vec, + best_known_block_number: T::BlockNumber, ) -> Result< (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, > { - if leaves_count > Self::mmr_leaves() { - return Err(Error::InvalidLeavesCount) - } + let leaves_count = + Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); + + // we need to translate the block_numbers into leaf indices. + let leaf_indices = block_numbers + .iter() + .map(|block_num| -> Result { + Self::block_num_to_leaf_index(*block_num) + }) + .collect::, _>>()?; let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); mmr.generate_batch_proof(leaf_indices) diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index bcb775ba02819..a63a433029295 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -235,22 +235,21 @@ fn should_generate_proofs_correctly() { // to retrieve full leaf data. register_offchain_ext(&mut ext); ext.execute_with(|| { - // when generate proofs for all leaves - let proofs = (0_u64..crate::NumberOfLeaves::::get()) + let best_block_number = frame_system::Pallet::::block_number(); + // when generate proofs for all leaves. + let proofs = (1_u64..=best_block_number) .into_iter() - .map(|leaf_index| { - crate::Pallet::::generate_batch_proof(vec![leaf_index]).unwrap() - }) + .map(|block_num| crate::Pallet::::generate_batch_proof(vec![block_num]).unwrap()) .collect::>(); // when generate historical proofs for all leaves - let historical_proofs = (0_u64..crate::NumberOfLeaves::::get()) + let historical_proofs = (1_u64..best_block_number) .into_iter() - .map(|leaf_index| { + .map(|block_num| { let mut proofs = vec![]; - for leaves_count in leaf_index + 1..=num_blocks { + for leaves_count in block_num..=num_blocks { proofs.push( crate::Pallet::::generate_historical_batch_proof( - vec![leaf_index], + vec![block_num], leaves_count, ) .unwrap(), @@ -321,7 +320,7 @@ fn should_generate_proofs_correctly() { leaf_count: 3, items: vec![hex( "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" - ),], + )], } ) ); @@ -352,6 +351,7 @@ fn should_generate_proofs_correctly() { assert_eq!( proofs[4], ( + // NOTE: the leaf index is equivalent to the block number(in this case 5) - 1 vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], BatchProof { leaf_indices: vec![4], @@ -393,7 +393,7 @@ fn should_generate_proofs_correctly() { } ) ); - assert_eq!(historical_proofs[6][0], proofs[6]); + assert_eq!(historical_proofs[5][1], proofs[5]); }); } @@ -410,11 +410,12 @@ fn should_generate_batch_proof_correctly() { register_offchain_ext(&mut ext); ext.execute_with(|| { // when generate proofs for a batch of leaves - let (.., proof) = crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap(); + let (.., proof) = crate::Pallet::::generate_batch_proof(vec![1, 5, 6]).unwrap(); // then assert_eq!( proof, BatchProof { + // the leaf indices are equivalent to the above specified block numbers - 1. leaf_indices: vec![0, 4, 5], leaf_count: 7, items: vec![ @@ -427,7 +428,7 @@ fn should_generate_batch_proof_correctly() { // when generate historical proofs for a batch of leaves let (.., historical_proof) = - crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap(); + crate::Pallet::::generate_historical_batch_proof(vec![1, 5, 6], 6).unwrap(); // then assert_eq!( historical_proof, @@ -443,7 +444,7 @@ fn should_generate_batch_proof_correctly() { // when generate historical proofs for a batch of leaves let (.., historical_proof) = - crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 7).unwrap(); + crate::Pallet::::generate_historical_batch_proof(vec![1, 5, 6], 7).unwrap(); // then assert_eq!(historical_proof, proof); }); @@ -500,15 +501,15 @@ fn should_verify() { fn should_verify_batch_proofs() { fn generate_and_verify_batch_proof( ext: &mut sp_io::TestExternalities, - leaf_indices: &Vec, + block_numbers: &Vec, blocks_to_add: usize, ) { let (leaves, proof) = ext.execute_with(|| { - crate::Pallet::::generate_batch_proof(leaf_indices.to_vec()).unwrap() + crate::Pallet::::generate_batch_proof(block_numbers.to_vec()).unwrap() }); let mmr_size = ext.execute_with(|| crate::Pallet::::mmr_leaves()); - let min_mmr_size = leaf_indices.iter().max().unwrap() + 1; + let min_mmr_size = block_numbers.iter().max().unwrap() + 1; // generate historical proofs for all possible mmr sizes, // lower bound being index of highest leaf to be proven @@ -516,7 +517,7 @@ fn should_verify_batch_proofs() { .map(|mmr_size| { ext.execute_with(|| { crate::Pallet::::generate_historical_batch_proof( - leaf_indices.to_vec(), + block_numbers.to_vec(), mmr_size, ) .unwrap() @@ -546,39 +547,41 @@ fn should_verify_batch_proofs() { // to retrieve full leaf data when generating proofs register_offchain_ext(&mut ext); - // verify that up to n=10, valid proofs are generated for all possible leaf combinations - for n in 0..10 { + // verify that up to n=10, valid proofs are generated for all possible block number + // combinations. + for n in 1..=10 { ext.execute_with(|| new_block()); ext.persist_offchain_overlay(); - // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n - let leaves_set: Vec> = (0..=n).into_iter().powerset().skip(1).collect(); + // generate powerset (skipping empty set) of all possible block number combinations for mmr + // size n. + let blocks_set: Vec> = (1..=n).into_iter().powerset().skip(1).collect(); - leaves_set.iter().for_each(|leaves_subset| { - generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + blocks_set.iter().for_each(|blocks_subset| { + generate_and_verify_batch_proof(&mut ext, &blocks_subset, 0); ext.persist_offchain_overlay(); }); } - // verify that up to n=15, valid proofs are generated for all possible 2-leaf combinations - for n in 10..15 { - // (MMR Leafs) + // verify that up to n=15, valid proofs are generated for all possible 2-block number + // combinations. + for n in 11..=15 { ext.execute_with(|| new_block()); ext.persist_offchain_overlay(); - // generate all possible 2-leaf combinations for mmr size n - let leaves_set: Vec> = (0..=n).into_iter().combinations(2).collect(); + // generate all possible 2-block number combinations for mmr size n. + let blocks_set: Vec> = (1..=n).into_iter().combinations(2).collect(); - leaves_set.iter().for_each(|leaves_subset| { - generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + blocks_set.iter().for_each(|blocks_subset| { + generate_and_verify_batch_proof(&mut ext, &blocks_subset, 0); ext.persist_offchain_overlay(); }); } - generate_and_verify_batch_proof(&mut ext, &vec![7, 11], 20); + generate_and_verify_batch_proof(&mut ext, &vec![8, 12], 20); ext.execute_with(|| add_blocks(1000)); ext.persist_offchain_overlay(); - generate_and_verify_batch_proof(&mut ext, &vec![7, 11, 100, 800], 100); + generate_and_verify_batch_proof(&mut ext, &vec![8, 12, 100, 800], 100); } #[test] @@ -650,11 +653,11 @@ fn should_verify_batch_proof_statelessly() { register_offchain_ext(&mut ext); let (leaves, proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() + crate::Pallet::::generate_batch_proof(vec![1, 4, 5]).unwrap() }); let (historical_leaves, historical_proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap() + crate::Pallet::::generate_historical_batch_proof(vec![1, 4, 5], 6).unwrap() }); // Verify proof without relying on any on-chain data. @@ -920,7 +923,7 @@ fn should_verify_canonicalized() { // Generate proofs for some blocks. let (leaves, proofs) = - ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![0, 4, 5, 7]).unwrap()); + ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![1, 4, 5, 7]).unwrap()); // Verify all previously generated proofs. ext.execute_with(|| { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); @@ -953,19 +956,19 @@ fn does_not_panic_when_generating_historical_proofs() { // when leaf index is invalid assert_eq!( crate::Pallet::::generate_historical_batch_proof(vec![10], 7), - Err(Error::LeafNotFound), + Err(Error::BlockNumToLeafIndex), ); // when leaves count is invalid assert_eq!( crate::Pallet::::generate_historical_batch_proof(vec![3], 100), - Err(Error::InvalidLeavesCount), + Err(Error::BlockNumToLeafIndex), ); // when both leaf index and leaves count are invalid assert_eq!( crate::Pallet::::generate_historical_batch_proof(vec![10], 100), - Err(Error::InvalidLeavesCount), + Err(Error::BlockNumToLeafIndex), ); }); } diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index b479d979f13f3..0edb8babd608e 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -142,7 +142,7 @@ pub use mmr_root_provider::MmrRootProvider; mod mmr_root_provider { use super::*; use crate::{known_payloads, payload::PayloadProvider, Payload}; - use sp_api::ProvideRuntimeApi; + use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_mmr_primitives::MmrApi; use sp_runtime::generic::BlockId; use sp_std::{marker::PhantomData, sync::Arc}; @@ -159,7 +159,7 @@ mod mmr_root_provider { where B: Block, R: ProvideRuntimeApi, - R::Api: MmrApi, + R::Api: MmrApi>, { /// Create new BEEFY Payload provider with MMR Root as payload. pub fn new(runtime: Arc) -> Self { @@ -182,7 +182,7 @@ mod mmr_root_provider { where B: Block, R: ProvideRuntimeApi, - R::Api: MmrApi, + R::Api: MmrApi>, { fn payload(&self, header: &B::Header) -> Option { self.mmr_root_from_digest_or_runtime(header).map(|mmr_root| { diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 7a26cae839ea9..06bc1f4bffe84 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -387,6 +387,8 @@ impl Proof { /// Merkle Mountain Range operation error. #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] pub enum Error { + /// Error during translation of a block number into a leaf index. + BlockNumToLeafIndex, /// Error while pushing new node. Push, /// Error getting the new root. @@ -403,8 +405,8 @@ pub enum Error { PalletNotIncluded, /// Cannot find the requested leaf index InvalidLeafIndex, - /// The provided leaves count is larger than the actual leaves count. - InvalidLeavesCount, + /// The provided best know block number is invalid. + InvalidBestKnownBlock, } impl Error { @@ -434,9 +436,9 @@ impl Error { sp_api::decl_runtime_apis! { /// API to interact with MMR pallet. - pub trait MmrApi { - /// Generate MMR proof for a leaf under given index. - fn generate_proof(leaf_index: LeafIndex) -> Result<(EncodableOpaqueLeaf, Proof), Error>; + pub trait MmrApi { + /// Generate MMR proof for a block with a specified `block_number`. + fn generate_proof(block_number: BlockNumber) -> Result<(EncodableOpaqueLeaf, Proof), Error>; /// Verify MMR proof against on-chain MMR. /// @@ -457,14 +459,13 @@ sp_api::decl_runtime_apis! { /// Return the on-chain MMR root hash. fn mmr_root() -> Result; - /// Generate MMR proof for a series of leaves under given indices. - fn generate_batch_proof(leaf_indices: Vec) - -> Result<(Vec, BatchProof), Error>; + /// Generate MMR proof for a series of blocks with the specified block numbers. + fn generate_batch_proof(block_numbers: Vec) -> Result<(Vec, BatchProof), Error>; - /// Generate MMR proof for a series of leaves under given indices, using MMR at given `leaves_count` size. + /// Generate MMR proof for a series of `block_numbers`, given the `best_known_block_number`. fn generate_historical_batch_proof( - leaf_indices: Vec, - leaves_count: LeafIndex + block_numbers: Vec, + best_known_block_number: BlockNumber ) -> Result<(Vec, BatchProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. From fd00b14932d37bee06371325269c163bf80d1d16 Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 13 Oct 2022 19:31:00 +0900 Subject: [PATCH 72/75] Enable the `wasmtime`-based WASM executor by default (#12486) --- client/cli/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 37a8fd2e0b64d..66742e14789ef 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -51,6 +51,6 @@ sp-version = { version = "5.0.0", path = "../../primitives/version" } tempfile = "3.1.0" [features] -default = ["rocksdb"] +default = ["rocksdb", "wasmtime"] rocksdb = ["sc-client-db/rocksdb"] wasmtime = ["sc-service/wasmtime"] From 6e04e48f36e1839532d566a4dcad3c57b4be939d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 13 Oct 2022 15:22:57 +0200 Subject: [PATCH 73/75] Trivial BlockId::Number => Hash (#12490) --- primitives/api/test/benches/bench.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 2682c91f94106..2445a5c07f09e 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -27,14 +27,14 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| runtime_api.benchmark_add_one(&block_id, &1)) }); c.bench_function("add one with recreating runtime api", |b| { let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) }); @@ -42,7 +42,7 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("vector add one with same runtime api", |b| { let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); let data = vec![0; 1000]; b.iter_with_large_drop(|| runtime_api.benchmark_vector_add_one(&block_id, &data)) @@ -50,7 +50,7 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("vector add one with recreating runtime api", |b| { let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); let data = vec![0; 1000]; b.iter_with_large_drop(|| client.runtime_api().benchmark_vector_add_one(&block_id, &data)) @@ -60,7 +60,7 @@ fn sp_api_benchmark(c: &mut Criterion) { let client = TestClientBuilder::new() .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .build(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| client.runtime_api().benchmark_indirect_call(&block_id).unwrap()) }); @@ -68,7 +68,7 @@ fn sp_api_benchmark(c: &mut Criterion) { let client = TestClientBuilder::new() .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .build(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| client.runtime_api().benchmark_direct_call(&block_id).unwrap()) }); } From f3139874cb50f9028ecba9bdbd3004e7f3f228f5 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 14 Oct 2022 11:27:32 +0200 Subject: [PATCH 74/75] BlockId removal: refactor: Backend::state_at (#12488) * Minor naming improved * BlockId removal refactor: Backend::state_at * formatting --- bin/node/bench/src/import.rs | 9 +++- client/api/src/backend.rs | 4 +- client/api/src/in_mem.rs | 21 ++++---- .../basic-authorship/src/basic_authorship.rs | 2 +- client/block-builder/src/lib.rs | 5 +- client/db/benches/state_access.rs | 10 ++-- client/db/src/lib.rs | 48 ++++++------------ client/service/src/client/call_executor.rs | 21 ++++---- client/service/src/client/client.rs | 49 ++++++++++++------- client/service/test/src/client/mod.rs | 26 ++++++---- primitives/blockchain/src/backend.rs | 4 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 6 +-- 12 files changed, 105 insertions(+), 100 deletions(-) diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 47f630eb68700..26f9391800ceb 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -34,7 +34,7 @@ use std::borrow::Cow; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; -use sc_client_api::backend::Backend; +use sc_client_api::{backend::Backend, HeaderBackend}; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; @@ -127,10 +127,15 @@ impl core::Benchmark for ImportBenchmark { context.import_block(self.block.clone()); let elapsed = start.elapsed(); + let hash = context + .client + .expect_block_hash_from_id(&BlockId::number(1)) + .expect("Block 1 was imported; qed"); + // Sanity checks. context .client - .state_at(&BlockId::number(1)) + .state_at(&hash) .expect("state_at failed for block#1") .inspect_state(|| { match self.block_type { diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index bcc7a9bff3b2d..0e94d28b75dd5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -505,11 +505,11 @@ pub trait Backend: AuxStore + Send + Sync { /// Returns true if state for given block is available. fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { - self.state_at(BlockId::Hash(*hash)).is_ok() + self.state_at(hash).is_ok() } /// Returns state backend with post-state of given block. - fn state_at(&self, block: BlockId) -> sp_blockchain::Result; + fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result; /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 9000f62aa6cc3..9cea1883bdcdc 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -686,7 +686,7 @@ where type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { - let old_state = self.state_at(BlockId::Hash(Default::default()))?; + let old_state = self.state_at(&Default::default())?; Ok(BlockImportOperation { pending_block: None, old_state, @@ -702,7 +702,8 @@ where operation: &mut Self::BlockImportOperation, block: BlockId, ) -> sp_blockchain::Result<()> { - operation.old_state = self.state_at(block)?; + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + operation.old_state = self.state_at(&hash)?; Ok(()) } @@ -768,16 +769,16 @@ where None } - fn state_at(&self, block: BlockId) -> sp_blockchain::Result { - match block { - BlockId::Hash(h) if h == Default::default() => return Ok(Self::State::default()), - _ => {}, + fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { + if *hash == Default::default() { + return Ok(Self::State::default()) } - self.blockchain - .id(block) - .and_then(|id| self.states.read().get(&id).cloned()) - .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", block))) + self.states + .read() + .get(hash) + .cloned() + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash))) } fn revert( diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index f5ccd9023a3db..da98ccab9cb07 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -736,7 +736,7 @@ mod tests { let api = client.runtime_api(); api.execute_block(&block_id, proposal.block).unwrap(); - let state = backend.state_at(block_id).unwrap(); + let state = backend.state_at(&genesis_hash).unwrap(); let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 803e9c1e8bf26..cd5e62e264200 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -258,12 +258,11 @@ where let proof = self.api.extract_proof(); - let state = self.backend.state_at(self.block_id)?; - let parent_hash = self.parent_hash; + let state = self.backend.state_at(&self.parent_hash)?; let storage_changes = self .api - .into_storage_changes(&state, parent_hash) + .into_storage_changes(&state, self.parent_hash) .map_err(sp_blockchain::Error::StorageChanges)?; Ok(BuiltBlock { diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 4f4c10bcc8f53..912a9b028f638 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -84,7 +84,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 .map(|(k, v)| (k.clone(), Some(v.clone()))) .collect::>(); - let (state_root, tx) = db.state_at(BlockId::Hash(parent_hash)).unwrap().storage_root( + let (state_root, tx) = db.state_at(&parent_hash).unwrap().storage_root( changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), StateVersion::V1, ); @@ -176,7 +176,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { for key in keys.iter().cycle().take(keys.len() * multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -214,7 +214,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -252,7 +252,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); @@ -290,7 +290,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { let _ = state .storage_hash(sp_core::storage::well_known_keys::CODE) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 32c4c9ef85ed9..ae777ad154f6d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1963,10 +1963,11 @@ impl sc_client_api::backend::Backend for Backend { operation: &mut Self::BlockImportOperation, block: BlockId, ) -> ClientResult<()> { + let hash = self.blockchain.expect_block_hash_from_id(&block)?; if block.is_pre_genesis() { operation.old_state = self.empty_state()?; } else { - operation.old_state = self.state_at(block)?; + operation.old_state = self.state_at(&hash)?; } operation.commit_state = true; @@ -2302,15 +2303,8 @@ impl sc_client_api::backend::Backend for Backend { &self.blockchain } - fn state_at(&self, block: BlockId) -> ClientResult { - use sc_client_api::blockchain::HeaderBackend as BcHeaderBackend; - - let is_genesis = match &block { - BlockId::Number(n) if n.is_zero() => true, - BlockId::Hash(h) if h == &self.blockchain.meta.read().genesis_hash => true, - _ => false, - }; - if is_genesis { + fn state_at(&self, hash: &Block::Hash) -> ClientResult { + if hash == &self.blockchain.meta.read().genesis_hash { if let Some(genesis_state) = &*self.genesis_state.read() { let root = genesis_state.root; let db_state = DbStateBuilder::::new(genesis_state.clone(), root) @@ -2322,14 +2316,7 @@ impl sc_client_api::backend::Backend for Backend { } } - let hash = match block { - BlockId::Hash(h) => h, - BlockId::Number(n) => self.blockchain.hash(n)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Unknown block number {}", n)) - })?, - }; - - match self.blockchain.header_metadata(hash) { + match self.blockchain.header_metadata(*hash) { Ok(ref hdr) => { let hint = || { sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) @@ -2337,7 +2324,7 @@ impl sc_client_api::backend::Backend for Backend { .is_some() }; if let Ok(()) = - self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) + self.storage.state_db.pin(hash, hdr.number.saturated_into::(), hint) { let root = hdr.state_root; let db_state = DbStateBuilder::::new(self.storage.clone(), root) @@ -2345,12 +2332,12 @@ impl sc_client_api::backend::Backend for Backend { self.shared_trie_cache.as_ref().map(|c| c.local_cache()), ) .build(); - let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); - Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) + let state = RefTrackingState::new(db_state, self.storage.clone(), Some(*hash)); + Ok(RecordStatsState::new(state, Some(*hash), self.state_usage.clone())) } else { Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", - block + hash ))) } }, @@ -2588,7 +2575,7 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(BlockId::Number(0)).unwrap(); + let state = db.state_at(&hash).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2623,7 +2610,8 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(BlockId::Number(1)).unwrap(); + let hash = db.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); + let state = db.state_at(&hash).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -3139,11 +3127,7 @@ pub(crate) mod tests { hash }; - let block0_hash = backend - .state_at(BlockId::Hash(hash0)) - .unwrap() - .storage_hash(&b"test"[..]) - .unwrap(); + let block0_hash = backend.state_at(&hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); let hash1 = { let mut op = backend.begin_operation().unwrap(); @@ -3182,11 +3166,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } - let block1_hash = backend - .state_at(BlockId::Hash(hash1)) - .unwrap() - .storage_hash(&b"test"[..]) - .unwrap(); + let block1_hash = backend.state_at(&hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); assert_ne!(block0_hash, block1_hash); } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index e39436ec641d7..8ab332a24be78 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -147,17 +147,14 @@ where extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let state = self.backend.state_at(*at)?; + let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + let state = self.backend.state_at(&at_hash)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, at)?; - let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) - })?; - let mut sm = StateMachine::new( &state, &mut changes, @@ -195,14 +192,11 @@ where { let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let state = self.backend.state_at(*at)?; + let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + let state = self.backend.state_at(&at_hash)?; let changes = &mut *changes.borrow_mut(); - let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) - })?; - // It is important to extract the runtime code here before we create the proof // recorder to not record it. We also need to fetch the runtime code from `state` to // make sure we use the caching layers. @@ -255,7 +249,9 @@ where fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let state = self.backend.state_at(*id)?; + + let at_hash = self.backend.blockchain().expect_block_hash_from_id(id)?; + let state = self.backend.state_at(&at_hash)?; let mut cache = StorageTransactionCache::::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -272,7 +268,8 @@ where method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let state = self.backend.state_at(*at)?; + let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + let state = self.backend.state_at(&at_hash)?; let trie_backend = state.as_trie_backend(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 27561046c3481..e2fd5cda1d2f0 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -414,8 +414,8 @@ where } /// Get a reference to the state at a given block. - pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { - self.backend.state_at(*block) + pub fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { + self.backend.state_at(hash) } /// Get the code at a given block. @@ -813,7 +813,7 @@ where Block::new(import_block.header.clone(), body.clone()), )?; - let state = self.backend.state_at(at)?; + let state = self.backend.state_at(parent_hash)?; let gen_storage_changes = runtime_api .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; @@ -1154,7 +1154,9 @@ where id: &BlockId, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(id).and_then(|state| prove_read(state, keys).map_err(Into::into)) + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash) + .and_then(|state| prove_read(state, keys).map_err(Into::into)) } fn read_child_proof( @@ -1163,7 +1165,8 @@ where child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(id) + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash) .and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into)) } @@ -1182,7 +1185,8 @@ where start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; // this is a read proof, using version V0 or V1 is equivalent. let root = state.storage_root(std::iter::empty(), StateVersion::V0).0; @@ -1204,7 +1208,8 @@ where if start_key.len() > MAX_NESTED_TRIE_DEPTH { return Err(Error::Backend("Invalid start key.".to_string())) } - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { @@ -1400,7 +1405,8 @@ where id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let keys = self.state_at(&hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); Ok(keys) } @@ -1409,7 +1415,8 @@ where id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let keys = state .keys(&key_prefix.0) .into_iter() @@ -1427,7 +1434,8 @@ where prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new(state, prefix, start_key)) } @@ -1439,7 +1447,8 @@ where prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new_child(state, child_info, prefix, start_key)) } @@ -1449,8 +1458,9 @@ where id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; Ok(self - .state_at(id)? + .state_at(&hash)? .storage(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1461,7 +1471,8 @@ where id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { - self.state_at(id)? + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash)? .storage_hash(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } @@ -1472,8 +1483,9 @@ where child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; let keys = self - .state_at(id)? + .state_at(&hash)? .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) @@ -1487,8 +1499,9 @@ where child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; Ok(self - .state_at(id)? + .state_at(&hash)? .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1500,7 +1513,8 @@ where child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { - self.state_at(id)? + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash)? .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } @@ -1681,7 +1695,8 @@ where } fn state_at(&self, at: &BlockId) -> Result { - self.state_at(at).map_err(Into::into) + let hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + self.state_at(&hash).map_err(Into::into) } } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index e0f47110d9046..c6ac1fc7d73d9 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -20,7 +20,7 @@ use futures::executor::block_on; use parity_scale_codec::{Decode, Encode, Joiner}; use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ - in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, StorageProvider, + in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, HeaderBackend, StorageProvider, }; use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sc_consensus::{ @@ -338,11 +338,15 @@ fn block_builder_works_with_transactions() { let block = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); + let hash0 = client + .expect_block_hash_from_id(&BlockId::Number(0)) + .expect("block 0 was just imported. qed"); + let hash1 = client + .expect_block_hash_from_id(&BlockId::Number(1)) + .expect("block 1 was just imported. qed"); + assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); + assert_ne!(client.state_at(&hash1).unwrap().pairs(), client.state_at(&hash0).unwrap().pairs()); assert_eq!( client .runtime_api() @@ -392,11 +396,15 @@ fn block_builder_does_not_include_invalid() { let block = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); + let hash0 = client + .expect_block_hash_from_id(&BlockId::Number(0)) + .expect("block 0 was just imported. qed"); + let hash1 = client + .expect_block_hash_from_id(&BlockId::Number(1)) + .expect("block 1 was just imported. qed"); + assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); + assert_ne!(client.state_at(&hash1).unwrap().pairs(), client.state_at(&hash0).unwrap().pairs()); assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index f80c6d0269116..79c05aec8adca 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -78,8 +78,8 @@ pub trait HeaderBackend: Send + Sync { /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is /// not found. fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { - self.block_hash_from_id(id).and_then(|n| { - n.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) + self.block_hash_from_id(id).and_then(|h| { + h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) }) } } diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index f9a57206ece4d..c3d3ec816f97e 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -24,7 +24,7 @@ use jsonrpsee::{ }; use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use std::sync::Arc; use sp_core::{ @@ -144,8 +144,8 @@ where fn call(&self, at: Option<::Hash>) -> RpcResult { self.deny_unsafe.check_if_safe()?; - let block_id = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); - let state = self.backend.state_at(block_id).map_err(error_into_rpc_err)?; + let hash = at.unwrap_or_else(|| self.client.info().best_hash); + let state = self.backend.state_at(&hash).map_err(error_into_rpc_err)?; let (top, child) = migration_status(&state).map_err(error_into_rpc_err)?; Ok(MigrationStatusResult { From 0ee03277c33b6334ddba7434e014fa637dcb6107 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 14 Oct 2022 13:26:13 +0200 Subject: [PATCH 75/75] Try-runtime CLI fix weight parsing (#12491) Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 1 + utils/frame/try-runtime/cli/Cargo.toml | 1 + .../try-runtime/cli/src/commands/follow_chain.rs | 5 +++-- .../cli/src/commands/on_runtime_upgrade.rs | 14 ++++++++------ 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 062195c70f8ab..032886b9945e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11356,6 +11356,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-version", + "sp-weights", "tokio", "zstd", ] diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 56ead30644d86..956eaff745335 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -31,6 +31,7 @@ sp-keystore = { version = "0.12.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../../primitives/state-machine" } sp-version = { version = "5.0.0", path = "../../../../primitives/version" } +sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } frame-try-runtime = { path = "../../../../frame/try-runtime" } [dev-dependencies] diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 88866b538169b..fb5345827858a 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -33,6 +33,7 @@ use sc_service::Configuration; use serde::de::DeserializeOwned; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_weights::Weight; use std::{collections::VecDeque, fmt::Debug, str::FromStr}; const SUB: &str = "chain_subscribeFinalizedHeads"; @@ -294,8 +295,8 @@ where full_extensions(), )?; - let consumed_weight = ::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode output: {:?}", e))?; + let consumed_weight = ::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode weight: {:?}", e))?; let storage_changes = changes .drain_storage_changes( diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 5055e4fb34581..1d7d876a4aa92 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -21,6 +21,7 @@ use parity_scale_codec::Decode; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_weights::Weight; use crate::{ build_executor, ensure_matching_spec, extract_code, local_spec, state_machine_call_with_proof, @@ -78,14 +79,15 @@ where Default::default(), // we don't really need any extensions here. )?; - let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode output: {:?}", e))?; + let (weight, total_weight) = <(Weight, Weight) as Decode>::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode weight: {:?}", e))?; log::info!( target: LOG_TARGET, - "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", - weight, - total_weight, - weight as f64 / total_weight.max(1) as f64 + "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = ({} ps, {} byte), total weight = ({} ps, {} byte) ({:.2} %, {:.2} %).", + weight.ref_time(), weight.proof_size(), + total_weight.ref_time(), total_weight.proof_size(), + (weight.ref_time() as f64 / total_weight.ref_time().max(1) as f64) * 100.0, + (weight.proof_size() as f64 / total_weight.proof_size().max(1) as f64) * 100.0, ); Ok(())