From 125092f355c11166c57deeef0e1841301a351889 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 4 Oct 2021 16:30:46 +0200 Subject: [PATCH] Introduce block authorship soft deadline (#9663) * Soft limit. * Add soft deadline tests. * cargo +nightly fmt --all * Fix sc-service test. * Improving tests --- Cargo.lock | 1 + .../basic-authorship/src/basic_authorship.rs | 186 +++++++++++++++++- client/service/test/Cargo.toml | 1 + client/service/test/src/client/mod.rs | 41 ++-- primitives/keyring/src/sr25519.rs | 11 ++ test-utils/runtime/client/src/lib.rs | 16 +- 6 files changed, 231 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5e8932fd5652d..622b52591b90d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8449,6 +8449,7 @@ version = "2.0.0" dependencies = [ "fdlimit", "futures 0.3.16", + "hex", "hex-literal", "log 0.4.14", "parity-scale-codec", diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index bbee60ae98dcf..0055254b67091 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -286,6 +286,11 @@ where } } +/// If the block is full we will attempt to push at most +/// this number of transactions before quitting for real. +/// It allows us to increase block utilization. +const MAX_SKIPPED_TRANSACTIONS: usize = 8; + impl Proposer where A: TransactionPool, @@ -309,11 +314,6 @@ where block_size_limit: Option, ) -> Result, PR::Proof>, sp_blockchain::Error> { - /// If the block is full we will attempt to push at most - /// this number of transactions before quitting for real. - /// It allows us to increase block utilization. - const MAX_SKIPPED_TRANSACTIONS: usize = 8; - let mut block_builder = self.client.new_block_at(&self.parent_id, inherent_digests, PR::ENABLED)?; @@ -336,6 +336,9 @@ where } // proceed with transactions + // We calculate soft deadline used only in case we start skipping transactions. + let now = (self.now)(); + let soft_deadline = now + deadline.saturating_duration_since(now) / 2; let block_timer = time::Instant::now(); let mut skipped = 0; let mut unqueue_invalid = Vec::new(); @@ -364,7 +367,8 @@ where let mut hit_block_size_limit = false; while let Some(pending_tx) = pending_iterator.next() { - if (self.now)() > deadline { + let now = (self.now)(); + if now > deadline { debug!( "Consensus deadline reached when pushing block transactions, \ proceeding with proposing." @@ -387,6 +391,13 @@ where MAX_SKIPPED_TRANSACTIONS - skipped, ); continue + } else if now < soft_deadline { + debug!( + "Transaction would overflow the block size limit, \ + but we still have time before the soft deadline, so \ + we will try a bit more." + ); + continue } else { debug!("Reached block size limit, proceeding with proposing."); hit_block_size_limit = true; @@ -408,6 +419,11 @@ where "Block seems full, but will try {} more transactions before quitting.", MAX_SKIPPED_TRANSACTIONS - skipped, ); + } else if (self.now)() < soft_deadline { + debug!( + "Block seems full, but we still have time before the soft deadline, \ + so we will try a bit more before quitting." + ); } else { debug!("Block is full, proceed with proposing."); break @@ -493,6 +509,7 @@ mod tests { use sp_api::Core; use sp_blockchain::HeaderBackend; use sp_consensus::{BlockOrigin, Environment, Proposer}; + use sp_core::Pair; use sp_runtime::traits::NumberFor; use substrate_test_runtime_client::{ prelude::*, @@ -512,6 +529,19 @@ mod tests { .into_signed_tx() } + fn exhausts_resources_extrinsic_from(who: usize) -> Extrinsic { + let pair = AccountKeyring::numeric(who); + let transfer = Transfer { + // increase the amount to bump priority + amount: 1, + nonce: 0, + from: pair.public(), + to: Default::default(), + }; + let signature = pair.sign(&transfer.encode()).into(); + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: true } + } + fn chain_event(header: B::Header) -> ChainEvent where NumberFor: From, @@ -557,7 +587,7 @@ mod tests { return value.1 } let old = value.1; - let new = old + time::Duration::from_secs(2); + let new = old + time::Duration::from_secs(1); *value = (true, new); old }), @@ -730,8 +760,8 @@ mod tests { // then // block should have some extrinsics although we have some more in the pool. - assert_eq!(block.extrinsics().len(), expected_block_extrinsics); assert_eq!(txpool.ready().count(), expected_pool_transactions); + assert_eq!(block.extrinsics().len(), expected_block_extrinsics); block }; @@ -744,6 +774,7 @@ mod tests { .expect("there should be header"), )), ); + assert_eq!(txpool.ready().count(), 7); // let's create one block and import it let block = propose_block(&client, 0, 2, 7); @@ -757,6 +788,7 @@ mod tests { .expect("there should be header"), )), ); + assert_eq!(txpool.ready().count(), 5); // now let's make sure that we can still make some progress let block = propose_block(&client, 1, 2, 5); @@ -849,4 +881,142 @@ mod tests { // block size and thus, one less transaction should fit into the limit. assert_eq!(block.extrinsics().len(), extrinsics_num - 2); } + + #[test] + fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() { + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner.clone(), + client.clone(), + ); + + block_on( + txpool.submit_at( + &BlockId::number(0), + SOURCE, + // add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources + (0..MAX_SKIPPED_TRANSACTIONS * 2) + .into_iter() + .map(|i| exhausts_resources_extrinsic_from(i)) + // and some transactions that are okay. + .chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _))) + .collect(), + ), + ) + .unwrap(); + + block_on( + txpool.maintain(chain_event( + client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"), + )), + ); + assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3); + + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); + + let cell = Mutex::new(time::Instant::now()); + let proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(0)).unwrap().unwrap(), + Box::new(move || { + let mut value = cell.lock(); + let old = *value; + *value = old + time::Duration::from_secs(1); + old + }), + ); + + // when + // give it enough time so that deadline is never triggered. + let deadline = time::Duration::from_secs(900); + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); + + // then block should have all non-exhaust resources extrinsics (+ the first one). + assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1); + } + + #[test] + fn should_only_skip_up_to_some_limit_after_soft_deadline() { + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner.clone(), + client.clone(), + ); + + block_on( + txpool.submit_at( + &BlockId::number(0), + SOURCE, + (0..MAX_SKIPPED_TRANSACTIONS + 2) + .into_iter() + .map(|i| exhausts_resources_extrinsic_from(i)) + // and some transactions that are okay. + .chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _))) + .collect(), + ), + ) + .unwrap(); + + block_on( + txpool.maintain(chain_event( + client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"), + )), + ); + assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 2); + + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); + + let deadline = time::Duration::from_secs(600); + let cell = Arc::new(Mutex::new((0, time::Instant::now()))); + let cell2 = cell.clone(); + let proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(0)).unwrap().unwrap(), + Box::new(move || { + let mut value = cell.lock(); + let (called, old) = *value; + // add time after deadline is calculated internally (hence 1) + let increase = if called == 1 { + // we start after the soft_deadline should have already been reached. + deadline / 2 + } else { + // but we make sure to never reach the actual deadline + time::Duration::from_millis(0) + }; + *value = (called + 1, old + increase); + old + }), + ); + + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); + + // then the block should have no transactions despite some in the pool + assert_eq!(block.extrinsics().len(), 1); + assert!( + cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS, + "Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline" + ); + } } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 85a6dcc9e8b29..9e66e9ca381d3 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -12,6 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +hex = "0.4" hex-literal = "0.3.1" tempfile = "3.1.0" tokio = { version = "1.10.0", features = ["time"] } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index d82a839936d7d..8ea605c0ea5be 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1759,12 +1759,21 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() - .take(2) - .map(|x| x.0) + .take(8) + .map(|x| hex::encode(&x.0)) .collect(); assert_eq!( res, - [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()] + [ + "00c232cf4e70a5e343317016dc805bf80a6a8cd8ad39958d56f99891b07851e0", + "085b2407916e53a86efeb8b72dbe338c4b341dab135252f96b6ed8022209b6cb", + "0befda6e1ca4ef40219d588a727f1271", + "1a560ecfd2a62c2b8521ef149d0804eb621050e3988ed97dca55f0d7c3e6aa34", + "1d66850d32002979d67dd29dc583af5b2ae2a1f71c1f35ad90fff122be7a3824", + "237498b98d8803334286e9f0483ef513098dd3c1c22ca21c4dc155b4ef6cc204", + "29b9db10ec5bf7907d8f74b5e60aa8140c4fbdd8127a1ee5600cb98e5ec01729", + "3a636f6465", + ] ); let res: Vec<_> = client @@ -1774,15 +1783,19 @@ fn storage_keys_iter_works() { Some(&StorageKey(hex!("3a636f6465").to_vec())), ) .unwrap() - .take(3) - .map(|x| x.0) + .take(7) + .map(|x| hex::encode(&x.0)) .collect(); assert_eq!( res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + "3a686561707061676573", + "52008686cc27f6e5ed83a216929942f8bcd32a396f09664a5698f81371934b56", + "5348d72ac6cc66e5d8cbecc27b0e0677503b845fe2382d819f83001781788fd5", + "5c2d5fda66373dabf970e4fb13d277ce91c5233473321129d32b5a8085fa8133", + "6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081", + "66484000ed3f75c95fc7b03f39c20ca1e1011e5999278247d3b2f5e3c3273808", + "79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d", ] ); @@ -1795,12 +1808,18 @@ fn storage_keys_iter_works() { )), ) .unwrap() - .take(1) - .map(|x| x.0) + .take(5) + .map(|x| hex::encode(x.0)) .collect(); assert_eq!( res, - [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()] + [ + "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", + "811ecfaadcf5f2ee1d67393247e2f71a1662d433e8ce7ff89fb0d4aa9561820b", + "a93d74caa7ec34ea1b04ce1e5c090245f867d333f0f88278a451e45299654dc5", + "a9ee1403384afbfc13f13be91ff70bfac057436212e53b9733914382ac942892", + "cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f", + ] ); } diff --git a/primitives/keyring/src/sr25519.rs b/primitives/keyring/src/sr25519.rs index 6a7aa3635a43a..604c330b1ea1b 100644 --- a/primitives/keyring/src/sr25519.rs +++ b/primitives/keyring/src/sr25519.rs @@ -89,9 +89,20 @@ impl Keyring { pub fn public(self) -> Public { self.pair().public() } + pub fn to_seed(self) -> String { format!("//{}", self) } + + /// Create a crypto `Pair` from a numeric value. + pub fn numeric(idx: usize) -> Pair { + Pair::from_string(&format!("//{}", idx), None).expect("numeric values are known good; qed") + } + + /// Get account id of a `numeric` account. + pub fn numeric_id(idx: usize) -> AccountId32 { + (*Self::numeric(idx).public().as_array_ref()).into() + } } impl From for &'static str { diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index dc5ccadc4574f..bcfe93b6f7975 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -37,7 +37,7 @@ use sc_client_api::light::{ use sp_core::{ sr25519, storage::{ChildInfo, Storage, StorageChild}, - ChangesTrieConfiguration, + ChangesTrieConfiguration, Pair, }; use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; @@ -118,11 +118,15 @@ impl GenesisParameters { sr25519::Public::from(Sr25519Keyring::Bob).into(), sr25519::Public::from(Sr25519Keyring::Charlie).into(), ], - vec![ - AccountKeyring::Alice.into(), - AccountKeyring::Bob.into(), - AccountKeyring::Charlie.into(), - ], + (0..16_usize) + .into_iter() + .map(|i| AccountKeyring::numeric(i).public()) + .chain(vec![ + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), + ]) + .collect(), 1000, self.heap_pages_override, self.extra_storage.clone(),