From 8c95499655219e6e22d6e06fed388ebdf19a8fcb Mon Sep 17 00:00:00 2001 From: Bernhard Schuster Date: Mon, 6 Dec 2021 15:20:29 +0100 Subject: [PATCH] dev-comment spelling mistakes (#4434) --- node/collation-generation/src/tests.rs | 4 ++-- node/core/backing/src/tests.rs | 4 ++-- node/core/pvf/src/executor_intf.rs | 6 +++--- node/core/pvf/src/host.rs | 8 ++++---- node/core/pvf/src/prepare/queue.rs | 6 +++--- node/core/pvf/src/worker_common.rs | 8 ++++---- .../network/collator-protocol/src/collator_side/mod.rs | 2 +- .../collator-protocol/src/collator_side/tests.rs | 6 +++--- .../collator-protocol/src/validator_side/mod.rs | 8 ++++---- .../collator-protocol/src/validator_side/tests.rs | 4 ++-- node/network/dispute-distribution/src/sender/mod.rs | 2 +- node/network/dispute-distribution/src/tests/mod.rs | 4 ++-- node/test/polkadot-simnet/common/src/lib.rs | 4 ++-- primitives/src/v1/signed.rs | 4 ++-- runtime/parachains/src/builder.rs | 8 ++++---- runtime/parachains/src/disputes.rs | 10 +++++----- runtime/parachains/src/hrmp.rs | 4 ++-- runtime/parachains/src/initializer.rs | 4 ++-- runtime/parachains/src/paras.rs | 2 +- runtime/parachains/src/paras/benchmarking.rs | 2 +- runtime/parachains/src/paras_inherent/mod.rs | 4 ++-- runtime/parachains/src/paras_inherent/tests.rs | 2 +- runtime/parachains/src/scheduler.rs | 4 ++-- runtime/parachains/src/session_info.rs | 2 +- runtime/parachains/src/ump.rs | 4 ++-- xcm/xcm-simulator/fuzzer/src/fuzz.rs | 2 +- 26 files changed, 59 insertions(+), 59 deletions(-) diff --git a/node/collation-generation/src/tests.rs b/node/collation-generation/src/tests.rs index 2e7417a79ec3..5b7667eb94bb 100644 --- a/node/collation-generation/src/tests.rs +++ b/node/collation-generation/src/tests.rs @@ -206,7 +206,7 @@ mod handle_new_activations { // the only activated hash should be from the 4 hash: // each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5 - // given that the test configuration has a para_id of 16, there's only one way to get that value: with the 4 + // given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4 // hash. assert_eq!(requested_validation_data, vec![[4; 32].into()]); } @@ -303,7 +303,7 @@ mod handle_new_activations { .into_inner(); // we expect a single message to be sent, containing a candidate receipt. - // we don't care too much about the commitments_hash right now, but let's ensure that we've calculated the + // we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the // correct descriptor let expect_pov_hash = test_collation_compressed().proof_of_validity.hash(); let expect_validation_data_hash = test_validation_data().hash(); diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index a262dd45d470..6c66af084758 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -196,7 +196,7 @@ impl TestCandidateBuilder { } } -// Tests that the subsystem performs actions that are requied on startup. +// Tests that the subsystem performs actions that are required on startup. async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestState) { // Start work on some new parent. virtual_overseer @@ -378,7 +378,7 @@ fn backing_second_works() { }); } -// Test that the candidate reaches quorum succesfully. +// Test that the candidate reaches quorum successfully. #[test] fn backing_works() { let test_state = TestState::default(); diff --git a/node/core/pvf/src/executor_intf.rs b/node/core/pvf/src/executor_intf.rs index 52c869c47737..a7a75df38a86 100644 --- a/node/core/pvf/src/executor_intf.rs +++ b/node/core/pvf/src/executor_intf.rs @@ -34,7 +34,7 @@ const CONFIG: Config = Config { // Besides `heap_pages` linear memory requests an initial number of pages. Those pages are // typically used for placing the so-called shadow stack and the data section. // - // By default, rustc (or lld specifically) allocates 1 MiB for the shadow stack. That is, 16 + // By default, rustc (or `lld` specifically) allocates 1 MiB for the shadow stack. That is, 16 // wasm pages. // // Data section for runtimes are typically rather small and can fit in a single digit number of @@ -51,7 +51,7 @@ const CONFIG: Config = Config { cache_path: None, semantics: Semantics { fast_instance_reuse: false, - // Enable determinstic stack limit to pin down the exact number of items the wasmtime stack + // Enable deterministic stack limit to pin down the exact number of items the wasmtime stack // can contain before it traps with stack overflow. // // Here is how the values below were chosen. @@ -60,7 +60,7 @@ const CONFIG: Config = Config { // (see the docs about the field and the instrumentation algorithm) is 8 bytes, 1 MiB can // fit 2x 65536 logical items. // - // Since reaching the native stack limit is undesirable, we halven the logical item limit and + // Since reaching the native stack limit is undesirable, we halve the logical item limit and // also increase the native 256x. This hopefully should preclude wasm code from reaching // the stack limit set by the wasmtime. deterministic_stack_limit: Some(DeterministicStackLimit { diff --git a/node/core/pvf/src/host.rs b/node/core/pvf/src/host.rs index 809d07164ba5..ae0f3d103b6a 100644 --- a/node/core/pvf/src/host.rs +++ b/node/core/pvf/src/host.rs @@ -145,7 +145,7 @@ pub struct Config { impl Config { /// Create a new instance of the configuration. pub fn new(cache_path: std::path::PathBuf, program_path: std::path::PathBuf) -> Self { - // Do not contaminate the other parts of the codebase with the types from async_std. + // Do not contaminate the other parts of the codebase with the types from `async_std`. let cache_path = PathBuf::from(cache_path); let program_path = PathBuf::from(program_path); @@ -378,7 +378,7 @@ async fn run( // can be scheduled as a result of this function call, in case there are pending // executions. // - // We could be eager in terms of reporting and plumb the result from the prepartion + // We could be eager in terms of reporting and plumb the result from the preparation // worker but we don't for the sake of simplicity. break_if_fatal!(handle_prepare_done( &cache_path, @@ -1087,7 +1087,7 @@ mod tests { // Received the precheck result. assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Ok(())); - // Send multiple requests for the same pvf. + // Send multiple requests for the same PVF. let mut precheck_receivers = Vec::new(); for _ in 0..3 { let (result_tx, result_rx) = oneshot::channel(); @@ -1121,7 +1121,7 @@ mod tests { let mut host = test.host_handle(); // Test mixed cases of receiving execute and precheck requests - // for the same pvf. + // for the same PVF. // Send PVF for the execution and request the prechecking for it. let (result_tx, result_rx_execute) = oneshot::channel(); diff --git a/node/core/pvf/src/prepare/queue.rs b/node/core/pvf/src/prepare/queue.rs index 0dfcb4e1a099..f1af292c7538 100644 --- a/node/core/pvf/src/prepare/queue.rs +++ b/node/core/pvf/src/prepare/queue.rs @@ -251,7 +251,7 @@ async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Resu if let Some(available) = find_idle_worker(queue) { // This may seem not fair (w.r.t priority) on the first glance, but it should be. This is - // because as soon as a worker finishes with the job it's immediatelly given the next one. + // because as soon as a worker finishes with the job it's immediately given the next one. assign(queue, available, job).await?; } else { spawn_extra_worker(queue, priority.is_critical()).await?; @@ -335,7 +335,7 @@ async fn handle_worker_concluded( match $expr { Some(v) => v, None => { - // Precondition of calling this is that the $expr is never none; + // Precondition of calling this is that the `$expr` is never none; // Assume the conditions holds, then this never is not hit; // qed. never!("never_none, {}", stringify!($expr)); @@ -794,7 +794,7 @@ mod tests { let w1 = test.workers.insert(()); test.send_from_pool(pool::FromPool::Spawned(w1)); - // Now, to the interesting part. After the queue normally issues the start_work command to + // Now, to the interesting part. After the queue normally issues the `start_work` command to // the pool, before receiving the command the queue may report that the worker ripped. assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); test.send_from_pool(pool::FromPool::Rip(w1)); diff --git a/node/core/pvf/src/worker_common.rs b/node/core/pvf/src/worker_common.rs index 9c68eaeeb0fb..534bcb617989 100644 --- a/node/core/pvf/src/worker_common.rs +++ b/node/core/pvf/src/worker_common.rs @@ -236,7 +236,7 @@ impl WorkerHandle { // We don't expect the bytes to be ever read. But in case we do, we should not use a buffer // of a small size, because otherwise if the child process does return any data we will end up // issuing a syscall for each byte. We also prefer not to do allocate that on the stack, since - // each poll the buffer will be allocated and initialized (and that's due poll_read takes &mut [u8] + // each poll the buffer will be allocated and initialized (and that's due `poll_read` takes &mut [u8] // and there are no guarantees that a `poll_read` won't ever read from there even though that's // unlikely). // @@ -259,7 +259,7 @@ impl futures::Future for WorkerHandle { let me = self.project(); match futures::ready!(AsyncRead::poll_read(me.stdout, cx, &mut *me.drop_box)) { Ok(0) => { - // 0 means EOF means the child was terminated. Resolve. + // 0 means `EOF` means the child was terminated. Resolve. Poll::Ready(()) }, Ok(_bytes_read) => { @@ -268,7 +268,7 @@ impl futures::Future for WorkerHandle { Poll::Pending }, Err(_) => { - // The implementation is guaranteed to not to return WouldBlock and Interrupted. This + // The implementation is guaranteed to not to return `WouldBlock` and Interrupted. This // leaves us with a legit errors which we suppose were due to termination. Poll::Ready(()) }, @@ -284,7 +284,7 @@ impl fmt::Debug for WorkerHandle { /// Convert the given path into a byte buffer. pub fn path_to_bytes(path: &Path) -> &[u8] { - // Ideally, we take the OsStr of the path, send that and reconstruct this on the other side. + // Ideally, we take the `OsStr` of the path, send that and reconstruct this on the other side. // However, libstd doesn't provide us with such an option. There are crates out there that // allow for extraction of a path, but TBH it doesn't seem to be a real issue. // diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 9bfe3a56ed9c..5a957735da31 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -938,7 +938,7 @@ where handle_incoming_peer_message(ctx, runtime, state, remote, msg).await?; }, NewGossipTopology(..) => { - // impossibru! + // impossible! }, } diff --git a/node/network/collator-protocol/src/collator_side/tests.rs b/node/network/collator-protocol/src/collator_side/tests.rs index 343243cc7140..44a3d1ee2c1a 100644 --- a/node/network/collator-protocol/src/collator_side/tests.rs +++ b/node/network/collator-protocol/src/collator_side/tests.rs @@ -311,7 +311,7 @@ async fn distribute_collation( // whether or not we expect a connection request or not. should_connect: bool, ) -> DistributeCollation { - // Now we want to distribute a PoVBlock + // Now we want to distribute a `PoVBlock` let pov_block = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pov_hash = pov_block.hash(); @@ -531,7 +531,7 @@ fn advertise_and_send_collation() { // We declare to the connected validators that we are a collator. // We need to catch all `Declare` messages to the validators we've - // previosly connected to. + // previously connected to. for peer_id in test_state.current_group_validator_peer_ids() { expect_declare_msg(&mut virtual_overseer, &test_state, &peer_id).await; } @@ -897,7 +897,7 @@ where // We declare to the connected validators that we are a collator. // We need to catch all `Declare` messages to the validators we've - // previosly connected to. + // previously connected to. for peer_id in test_state.current_group_validator_peer_ids() { expect_declare_msg(virtual_overseer, &test_state, &peer_id).await; } diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index df584ad45096..8d76488cfc62 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1009,7 +1009,7 @@ where state.metrics.note_collator_peer_count(state.peer_data.len()); }, NewGossipTopology(..) => { - // impossibru! + // impossible! }, PeerViewChange(peer_id, view) => { handle_peer_view_change(state, peer_id, view).await?; @@ -1254,7 +1254,7 @@ async fn handle_collation_fetched_result( Context: SubsystemContext, { // If no prior collation for this relay parent has been seconded, then - // memoize the collation_event for that relay_parent, such that we may + // memorize the `collation_event` for that `relay_parent`, such that we may // notify the collator of their successful second backing let relay_parent = collation_event.1.relay_parent; @@ -1392,7 +1392,7 @@ async fn poll_collation_response( "Fetching collation failed due to network error" ); // A minor decrease in reputation for any network failure seems - // sensible. In theory this could be exploited, by DoSing this node, + // sensible. In theory this could be exploited, by Dosing this node, // which would result in reduced reputation for proper nodes, but the // same can happen for penalties on timeouts, which we also have. CollationFetchResult::Error(COST_NETWORK_ERROR) @@ -1406,7 +1406,7 @@ async fn poll_collation_response( "Request timed out" ); // A minor decrease in reputation for any network failure seems - // sensible. In theory this could be exploited, by DoSing this node, + // sensible. In theory this could be exploited, by Dosing this node, // which would result in reduced reputation for proper nodes, but the // same can happen for penalties on timeouts, which we also have. CollationFetchResult::Error(COST_REQUEST_TIMED_OUT) diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs index 4e92f2eff0cf..6815e1966f05 100644 --- a/node/network/collator-protocol/src/validator_side/tests.rs +++ b/node/network/collator-protocol/src/validator_side/tests.rs @@ -494,7 +494,7 @@ fn collator_authentication_verification_works() { // our view. // - Collation protocol should request one PoV. // - Collation protocol should disconnect both collators after having received the collation. -// - The same collators plus an additional collator connect again and send povs for a different relay parent. +// - The same collators plus an additional collator connect again and send `PoV`s for a different relay parent. // - Collation protocol will request one PoV, but we will cancel it. // - Collation protocol should request the second PoV which does not succeed in time. // - Collation protocol should request third PoV. @@ -697,7 +697,7 @@ fn reject_connection_to_next_group() { &mut virtual_overseer, peer_b.clone(), test_state.collators[0].clone(), - test_state.chain_ids[1].clone(), // next, not current para_id + test_state.chain_ids[1].clone(), // next, not current `para_id` ) .await; diff --git a/node/network/dispute-distribution/src/sender/mod.rs b/node/network/dispute-distribution/src/sender/mod.rs index c047c4b72665..55d88f1310ae 100644 --- a/node/network/dispute-distribution/src/sender/mod.rs +++ b/node/network/dispute-distribution/src/sender/mod.rs @@ -269,7 +269,7 @@ impl DisputeSender { // but I don't want to enable a bypass for the below smart constructor and this code path // is supposed to be only hit on startup basically. // - // Revisit this decision when the `from_signed_statements` is unneded for the normal code + // Revisit this decision when the `from_signed_statements` is unneeded for the normal code // path as well. let message = DisputeMessage::from_signed_statements( valid_signed, diff --git a/node/network/dispute-distribution/src/tests/mod.rs b/node/network/dispute-distribution/src/tests/mod.rs index 9bec01290682..07239fc55214 100644 --- a/node/network/dispute-distribution/src/tests/mod.rs +++ b/node/network/dispute-distribution/src/tests/mod.rs @@ -363,7 +363,7 @@ fn send_dispute_gets_cleaned_up() { ) .await; - // Yield, so subsystem can make progess: + // Yield, so subsystem can make progress: Delay::new(Duration::from_millis(2)).await; conclude(&mut handle).await; @@ -582,7 +582,7 @@ async fn conclude(handle: &mut TestSubsystemContextHandle::from_args(); // set up logging let filters = cmd.run.base.log_filters()?; diff --git a/primitives/src/v1/signed.rs b/primitives/src/v1/signed.rs index cc84448f88f8..6bf8c6c0480e 100644 --- a/primitives/src/v1/signed.rs +++ b/primitives/src/v1/signed.rs @@ -150,7 +150,7 @@ impl, RealPayload: Encode> Signed` beacuse that conversion consumes +// We can't bound this on `Payload: Into` because that conversion consumes // the payload, and we don't want that. We can't bound it on `Payload: AsRef` // because there's no blanket impl of `AsRef for T`. In the end, we just invent our // own trait which does what we need: EncodeAs. @@ -214,7 +214,7 @@ impl, RealPayload: Encode> UncheckedSigned(payload: &Payload, context: &SigningContext) -> Vec { - // equivalent to (real_payload, context).encode() + // equivalent to (`real_payload`, context).encode() let mut out = payload.encode_as(); out.extend(context.encode()); out diff --git a/runtime/parachains/src/builder.rs b/runtime/parachains/src/builder.rs index 09a39500eff7..2c6eae537497 100644 --- a/runtime/parachains/src/builder.rs +++ b/runtime/parachains/src/builder.rs @@ -149,10 +149,10 @@ impl BenchBuilder { /// Mock header. pub(crate) fn header(block_number: T::BlockNumber) -> T::Header { T::Header::new( - block_number, // block_number, - Default::default(), // extrinsics_root, - Default::default(), // storage_root, - Default::default(), // parent_hash, + block_number, // `block_number`, + Default::default(), // `extrinsics_root`, + Default::default(), // `storage_root`, + Default::default(), // `parent_hash`, Default::default(), // digest, ) } diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs index 47b9061e3653..030ce633437a 100644 --- a/runtime/parachains/src/disputes.rs +++ b/runtime/parachains/src/disputes.rs @@ -602,7 +602,7 @@ impl StatementSetFilter { // reverse order ensures correctness for index in indices.into_iter().rev() { - // swap_remove guarantees linear complexity. + // `swap_remove` guarantees linear complexity. statement_set.statements.swap_remove(index); } @@ -1579,7 +1579,7 @@ mod tests { }); } - // Test prunning works + // Test pruning works #[test] fn test_initializer_on_new_session() { let dispute_period = 3; @@ -2244,7 +2244,7 @@ mod tests { Pallet::::note_included(4, candidate_hash.clone(), 4); assert_eq!(SpamSlots::::get(4), Some(vec![0, 0, 0, 0, 0, 0, 0])); - // Ensure the reward_validator function was correctly called + // Ensure the `reward_validator` function was correctly called assert_eq!( REWARD_VALIDATORS.with(|r| r.borrow().clone()), vec![ @@ -2304,7 +2304,7 @@ mod tests { assert_noop!( { Pallet::::revert_and_freeze(0); - Result::<(), ()>::Err(()) // Just a small trick in order to use assert_noop. + Result::<(), ()>::Err(()) // Just a small trick in order to use `assert_noop`. }, (), ); @@ -2325,7 +2325,7 @@ mod tests { assert_noop!( { Pallet::::revert_and_freeze(10); - Result::<(), ()>::Err(()) // Just a small trick in order to use assert_noop. + Result::<(), ()>::Err(()) // Just a small trick in order to use `assert_noop`. }, (), ); diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 87ba4ad861b8..82e8f90a26ba 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -1964,7 +1964,7 @@ mod tests { assert_eq!(::Currency::free_balance(¶_a.into_account()), 80); // Then deregister one parachain, but don't wait two sessions until it takes effect. - // Instead, para_b will confirm the request, which will take place the same time + // Instead, `para_b` will confirm the request, which will take place the same time // the offboarding should happen. deregister_parachain(para_a); run_to_block(9, Some(vec![9])); @@ -1973,7 +1973,7 @@ mod tests { assert!(!channel_exists(para_a, para_b)); run_to_block(10, Some(vec![10])); - // The outcome we expect is para_b should receive the refund. + // The outcome we expect is `para_b` should receive the refund. assert_eq!(::Currency::free_balance(¶_b.into_account()), 110); assert!(!channel_exists(para_a, para_b)); assert_storage_consistency_exhaustive(); diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index 0467e10f1a4b..d8be40eb56db 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -147,7 +147,7 @@ pub mod pallet { // - Paras // - Scheduler // - Inclusion - // - SessionInfo + // - `SessionInfo` // - Disputes // - DMP // - UMP @@ -292,7 +292,7 @@ impl Pallet { } } - // Allow to trigger on_new_session in tests, this is needed as long as pallet_session is not + // Allow to trigger `on_new_session` in tests, this is needed as long as `pallet_session` is not // implemented in mock. #[cfg(any(test, feature = "runtime-benchmarks"))] pub(crate) fn test_trigger_on_new_session<'a, I: 'a>( diff --git a/runtime/parachains/src/paras.rs b/runtime/parachains/src/paras.rs index 8e8b2a6d6c96..ececbf7a15d0 100644 --- a/runtime/parachains/src/paras.rs +++ b/runtime/parachains/src/paras.rs @@ -249,7 +249,7 @@ impl ParaPastCodeMeta { // no-op prune. self.upgrade_times.drain(self.upgrade_times.len()..) } else { - // if we are actually pruning something, update the last_pruned member. + // if we are actually pruning something, update the `last_pruned` member. self.last_pruned = Some(self.upgrade_times[to_prune - 1].activated_at); self.upgrade_times.drain(..to_prune) }; diff --git a/runtime/parachains/src/paras/benchmarking.rs b/runtime/parachains/src/paras/benchmarking.rs index 9c3de5cf9b2c..d3b7b9484f92 100644 --- a/runtime/parachains/src/paras/benchmarking.rs +++ b/runtime/parachains/src/paras/benchmarking.rs @@ -108,7 +108,7 @@ benchmarks! { let s in 1 .. MAX_HEAD_DATA_SIZE; let para_id = ParaId::from(1000); let new_head = HeadData(vec![0; s as usize]); - // schedule an expired code upgrade for this para_id so that force_note_new_head would use + // schedule an expired code upgrade for this `para_id` so that force_note_new_head would use // the worst possible code path let expired = frame_system::Pallet::::block_number().saturating_sub(One::one()); let config = HostConfiguration::::default(); diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 761ac6a317b0..86fd19772efd 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -142,7 +142,7 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(_: T::BlockNumber) -> Weight { - T::DbWeight::get().reads_writes(1, 1) // in on_finalize. + T::DbWeight::get().reads_writes(1, 1) // in `on_finalize`. } fn on_finalize(_: T::BlockNumber) { @@ -947,7 +947,7 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { if let Some(vrf_random) = vrf_random { entropy.as_mut().copy_from_slice(vrf_random.as_ref()); } else { - // in case there is no vrf randomness present, we utilize the relay parent + // in case there is no VRF randomness present, we utilize the relay parent // as seed, it's better than a static value. log::warn!(target: LOG_TARGET, "CurrentBlockRandomness did not provide entropy"); entropy.as_mut().copy_from_slice(parent_hash.as_ref()); diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index e5b58b8287d5..713a313dd9c6 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -203,7 +203,7 @@ mod enter { new_test_ext(MockGenesisConfig::default()).execute_with(|| { // Create the inherent data for this block let dispute_statements = BTreeMap::new(); - // No backed and concluding cores, so all cores will be filld with disputes. + // No backed and concluding cores, so all cores will be filled with disputes. let backed_and_concluding = BTreeMap::new(); let scenario = make_inherent_data(TestConfig { diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 706fd844b316..519646dab320 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -533,7 +533,7 @@ impl Pallet { let mut availability_cores = AvailabilityCores::::get(); Scheduled::::mutate(|scheduled| { - // The constraints on the function require that now_occupied is a sorted subset of the + // The constraints on the function require that `now_occupied` is a sorted subset of the // `scheduled` cores, which are also sorted. let mut occupied_iter = now_occupied.iter().cloned().peekable(); @@ -983,7 +983,7 @@ mod tests { schedule_blank_para(thread_c, false); } - // set up a queue as if n_cores was 4 and with some with many retries. + // set up a queue as if `n_cores` was 4 and with some with many retries. ParathreadQueue::::put({ let mut queue = ParathreadClaimQueue::default(); diff --git a/runtime/parachains/src/session_info.rs b/runtime/parachains/src/session_info.rs index d69f71e827b9..9c53357cb6c4 100644 --- a/runtime/parachains/src/session_info.rs +++ b/runtime/parachains/src/session_info.rs @@ -264,7 +264,7 @@ mod tests { assert!(Sessions::::get(8).is_some()); assert!(Sessions::::get(9).is_some()); - // changing dispute_period works + // changing `dispute_period` works let dispute_period = 5; Configuration::set_dispute_period(Origin::root(), dispute_period).unwrap(); diff --git a/runtime/parachains/src/ump.rs b/runtime/parachains/src/ump.rs index 47111e357db9..9bcb393f9de8 100644 --- a/runtime/parachains/src/ump.rs +++ b/runtime/parachains/src/ump.rs @@ -774,7 +774,7 @@ pub(crate) mod tests { assert!(!queue.is_empty()); } - // actually count the counts and sizes in queues and compare them to the bookkeeped version. + // actually count the counts and sizes in queues and compare them to the bookkept version. for (para, queue) in ::RelayDispatchQueues::iter() { let (expected_count, expected_size) = ::RelayDispatchQueueSize::get(para); let (actual_count, actual_size) = @@ -934,7 +934,7 @@ pub(crate) mod tests { ) .execute_with(|| { // We want to test here an edge case, where we remove the queue with the highest - // para id (i.e. last in the needs_dispatch order). + // para id (i.e. last in the `needs_dispatch` order). // // If the last entry was removed we should proceed execution, assuming we still have // weight available. diff --git a/xcm/xcm-simulator/fuzzer/src/fuzz.rs b/xcm/xcm-simulator/fuzzer/src/fuzz.rs index 44516ab8a562..643fca703782 100644 --- a/xcm/xcm-simulator/fuzzer/src/fuzz.rs +++ b/xcm/xcm-simulator/fuzzer/src/fuzz.rs @@ -129,7 +129,7 @@ fn main() { } #[cfg(not(fuzzing))] { - //This code path can be used to generate a line-code coverage report in html + //This code path can be used to generate a line-code coverage report in HTML //that depicts which lines are executed by at least one input in the current fuzzing queue. //To generate this code coverage report, run the following commands: /*