Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
dev-comment spelling mistakes
Browse files Browse the repository at this point in the history
  • Loading branch information
drahnr committed Dec 6, 2021
1 parent c2bab49 commit 34f67e4
Show file tree
Hide file tree
Showing 26 changed files with 59 additions and 59 deletions.
4 changes: 2 additions & 2 deletions node/collation-generation/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ mod handle_new_activations {

// the only activated hash should be from the 4 hash:
// each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5
// given that the test configuration has a para_id of 16, there's only one way to get that value: with the 4
// given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4
// hash.
assert_eq!(requested_validation_data, vec![[4; 32].into()]);
}
Expand Down Expand Up @@ -303,7 +303,7 @@ mod handle_new_activations {
.into_inner();

// we expect a single message to be sent, containing a candidate receipt.
// we don't care too much about the commitments_hash right now, but let's ensure that we've calculated the
// we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the
// correct descriptor
let expect_pov_hash = test_collation_compressed().proof_of_validity.hash();
let expect_validation_data_hash = test_validation_data().hash();
Expand Down
4 changes: 2 additions & 2 deletions node/core/backing/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ impl TestCandidateBuilder {
}
}

// Tests that the subsystem performs actions that are requied on startup.
// Tests that the subsystem performs actions that are required on startup.
async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestState) {
// Start work on some new parent.
virtual_overseer
Expand Down Expand Up @@ -378,7 +378,7 @@ fn backing_second_works() {
});
}

// Test that the candidate reaches quorum succesfully.
// Test that the candidate reaches quorum successfully.
#[test]
fn backing_works() {
let test_state = TestState::default();
Expand Down
6 changes: 3 additions & 3 deletions node/core/pvf/src/executor_intf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ const CONFIG: Config = Config {
// Besides `heap_pages` linear memory requests an initial number of pages. Those pages are
// typically used for placing the so-called shadow stack and the data section.
//
// By default, rustc (or lld specifically) allocates 1 MiB for the shadow stack. That is, 16
// By default, rustc (or `lld` specifically) allocates 1 MiB for the shadow stack. That is, 16
// wasm pages.
//
// Data section for runtimes are typically rather small and can fit in a single digit number of
Expand All @@ -51,7 +51,7 @@ const CONFIG: Config = Config {
cache_path: None,
semantics: Semantics {
fast_instance_reuse: false,
// Enable determinstic stack limit to pin down the exact number of items the wasmtime stack
// Enable deterministic stack limit to pin down the exact number of items the wasmtime stack
// can contain before it traps with stack overflow.
//
// Here is how the values below were chosen.
Expand All @@ -60,7 +60,7 @@ const CONFIG: Config = Config {
// (see the docs about the field and the instrumentation algorithm) is 8 bytes, 1 MiB can
// fit 2x 65536 logical items.
//
// Since reaching the native stack limit is undesirable, we halven the logical item limit and
// Since reaching the native stack limit is undesirable, we halve the logical item limit and
// also increase the native 256x. This hopefully should preclude wasm code from reaching
// the stack limit set by the wasmtime.
deterministic_stack_limit: Some(DeterministicStackLimit {
Expand Down
8 changes: 4 additions & 4 deletions node/core/pvf/src/host.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ pub struct Config {
impl Config {
/// Create a new instance of the configuration.
pub fn new(cache_path: std::path::PathBuf, program_path: std::path::PathBuf) -> Self {
// Do not contaminate the other parts of the codebase with the types from async_std.
// Do not contaminate the other parts of the codebase with the types from `async_std`.
let cache_path = PathBuf::from(cache_path);
let program_path = PathBuf::from(program_path);

Expand Down Expand Up @@ -378,7 +378,7 @@ async fn run(
// can be scheduled as a result of this function call, in case there are pending
// executions.
//
// We could be eager in terms of reporting and plumb the result from the prepartion
// We could be eager in terms of reporting and plumb the result from the preparation
// worker but we don't for the sake of simplicity.
break_if_fatal!(handle_prepare_done(
&cache_path,
Expand Down Expand Up @@ -1087,7 +1087,7 @@ mod tests {
// Received the precheck result.
assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Ok(()));

// Send multiple requests for the same pvf.
// Send multiple requests for the same PVF.
let mut precheck_receivers = Vec::new();
for _ in 0..3 {
let (result_tx, result_rx) = oneshot::channel();
Expand Down Expand Up @@ -1121,7 +1121,7 @@ mod tests {
let mut host = test.host_handle();

// Test mixed cases of receiving execute and precheck requests
// for the same pvf.
// for the same PVF.

// Send PVF for the execution and request the prechecking for it.
let (result_tx, result_rx_execute) = oneshot::channel();
Expand Down
6 changes: 3 additions & 3 deletions node/core/pvf/src/prepare/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ async fn handle_enqueue(queue: &mut Queue, priority: Priority, pvf: Pvf) -> Resu

if let Some(available) = find_idle_worker(queue) {
// This may seem not fair (w.r.t priority) on the first glance, but it should be. This is
// because as soon as a worker finishes with the job it's immediatelly given the next one.
// because as soon as a worker finishes with the job it's immediately given the next one.
assign(queue, available, job).await?;
} else {
spawn_extra_worker(queue, priority.is_critical()).await?;
Expand Down Expand Up @@ -335,7 +335,7 @@ async fn handle_worker_concluded(
match $expr {
Some(v) => v,
None => {
// Precondition of calling this is that the $expr is never none;
// Precondition of calling this is that the `$expr` is never none;
// Assume the conditions holds, then this never is not hit;
// qed.
never!("never_none, {}", stringify!($expr));
Expand Down Expand Up @@ -794,7 +794,7 @@ mod tests {
let w1 = test.workers.insert(());
test.send_from_pool(pool::FromPool::Spawned(w1));

// Now, to the interesting part. After the queue normally issues the start_work command to
// Now, to the interesting part. After the queue normally issues the `start_work` command to
// the pool, before receiving the command the queue may report that the worker ripped.
assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. });
test.send_from_pool(pool::FromPool::Rip(w1));
Expand Down
8 changes: 4 additions & 4 deletions node/core/pvf/src/worker_common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ impl WorkerHandle {
// We don't expect the bytes to be ever read. But in case we do, we should not use a buffer
// of a small size, because otherwise if the child process does return any data we will end up
// issuing a syscall for each byte. We also prefer not to do allocate that on the stack, since
// each poll the buffer will be allocated and initialized (and that's due poll_read takes &mut [u8]
// each poll the buffer will be allocated and initialized (and that's due `poll_read` takes &mut [u8]
// and there are no guarantees that a `poll_read` won't ever read from there even though that's
// unlikely).
//
Expand All @@ -259,7 +259,7 @@ impl futures::Future for WorkerHandle {
let me = self.project();
match futures::ready!(AsyncRead::poll_read(me.stdout, cx, &mut *me.drop_box)) {
Ok(0) => {
// 0 means EOF means the child was terminated. Resolve.
// 0 means `EOF` means the child was terminated. Resolve.
Poll::Ready(())
},
Ok(_bytes_read) => {
Expand All @@ -268,7 +268,7 @@ impl futures::Future for WorkerHandle {
Poll::Pending
},
Err(_) => {
// The implementation is guaranteed to not to return WouldBlock and Interrupted. This
// The implementation is guaranteed to not to return `WouldBlock` and Interrupted. This
// leaves us with a legit errors which we suppose were due to termination.
Poll::Ready(())
},
Expand All @@ -284,7 +284,7 @@ impl fmt::Debug for WorkerHandle {

/// Convert the given path into a byte buffer.
pub fn path_to_bytes(path: &Path) -> &[u8] {
// Ideally, we take the OsStr of the path, send that and reconstruct this on the other side.
// Ideally, we take the `OsStr` of the path, send that and reconstruct this on the other side.
// However, libstd doesn't provide us with such an option. There are crates out there that
// allow for extraction of a path, but TBH it doesn't seem to be a real issue.
//
Expand Down
2 changes: 1 addition & 1 deletion node/network/collator-protocol/src/collator_side/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -938,7 +938,7 @@ where
handle_incoming_peer_message(ctx, runtime, state, remote, msg).await?;
},
NewGossipTopology(..) => {
// impossibru!
// impossible!
},
}

Expand Down
6 changes: 3 additions & 3 deletions node/network/collator-protocol/src/collator_side/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ async fn distribute_collation(
// whether or not we expect a connection request or not.
should_connect: bool,
) -> DistributeCollation {
// Now we want to distribute a PoVBlock
// Now we want to distribute a `PoVBlock`
let pov_block = PoV { block_data: BlockData(vec![42, 43, 44]) };

let pov_hash = pov_block.hash();
Expand Down Expand Up @@ -531,7 +531,7 @@ fn advertise_and_send_collation() {

// We declare to the connected validators that we are a collator.
// We need to catch all `Declare` messages to the validators we've
// previosly connected to.
// previously connected to.
for peer_id in test_state.current_group_validator_peer_ids() {
expect_declare_msg(&mut virtual_overseer, &test_state, &peer_id).await;
}
Expand Down Expand Up @@ -897,7 +897,7 @@ where

// We declare to the connected validators that we are a collator.
// We need to catch all `Declare` messages to the validators we've
// previosly connected to.
// previously connected to.
for peer_id in test_state.current_group_validator_peer_ids() {
expect_declare_msg(virtual_overseer, &test_state, &peer_id).await;
}
Expand Down
8 changes: 4 additions & 4 deletions node/network/collator-protocol/src/validator_side/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1009,7 +1009,7 @@ where
state.metrics.note_collator_peer_count(state.peer_data.len());
},
NewGossipTopology(..) => {
// impossibru!
// impossible!
},
PeerViewChange(peer_id, view) => {
handle_peer_view_change(state, peer_id, view).await?;
Expand Down Expand Up @@ -1254,7 +1254,7 @@ async fn handle_collation_fetched_result<Context>(
Context: SubsystemContext<Message = CollatorProtocolMessage>,
{
// If no prior collation for this relay parent has been seconded, then
// memoize the collation_event for that relay_parent, such that we may
// memorize the `collation_event` for that `relay_parent`, such that we may
// notify the collator of their successful second backing
let relay_parent = collation_event.1.relay_parent;

Expand Down Expand Up @@ -1392,7 +1392,7 @@ async fn poll_collation_response(
"Fetching collation failed due to network error"
);
// A minor decrease in reputation for any network failure seems
// sensible. In theory this could be exploited, by DoSing this node,
// sensible. In theory this could be exploited, by Dosing this node,
// which would result in reduced reputation for proper nodes, but the
// same can happen for penalties on timeouts, which we also have.
CollationFetchResult::Error(COST_NETWORK_ERROR)
Expand All @@ -1406,7 +1406,7 @@ async fn poll_collation_response(
"Request timed out"
);
// A minor decrease in reputation for any network failure seems
// sensible. In theory this could be exploited, by DoSing this node,
// sensible. In theory this could be exploited, by Dosing this node,
// which would result in reduced reputation for proper nodes, but the
// same can happen for penalties on timeouts, which we also have.
CollationFetchResult::Error(COST_REQUEST_TIMED_OUT)
Expand Down
4 changes: 2 additions & 2 deletions node/network/collator-protocol/src/validator_side/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -494,7 +494,7 @@ fn collator_authentication_verification_works() {
// our view.
// - Collation protocol should request one PoV.
// - Collation protocol should disconnect both collators after having received the collation.
// - The same collators plus an additional collator connect again and send povs for a different relay parent.
// - The same collators plus an additional collator connect again and send `PoV`s for a different relay parent.
// - Collation protocol will request one PoV, but we will cancel it.
// - Collation protocol should request the second PoV which does not succeed in time.
// - Collation protocol should request third PoV.
Expand Down Expand Up @@ -697,7 +697,7 @@ fn reject_connection_to_next_group() {
&mut virtual_overseer,
peer_b.clone(),
test_state.collators[0].clone(),
test_state.chain_ids[1].clone(), // next, not current para_id
test_state.chain_ids[1].clone(), // next, not current `para_id`
)
.await;

Expand Down
2 changes: 1 addition & 1 deletion node/network/dispute-distribution/src/sender/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ impl DisputeSender {
// but I don't want to enable a bypass for the below smart constructor and this code path
// is supposed to be only hit on startup basically.
//
// Revisit this decision when the `from_signed_statements` is unneded for the normal code
// Revisit this decision when the `from_signed_statements` is unneeded for the normal code
// path as well.
let message = DisputeMessage::from_signed_statements(
valid_signed,
Expand Down
4 changes: 2 additions & 2 deletions node/network/dispute-distribution/src/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ fn send_dispute_gets_cleaned_up() {
)
.await;

// Yield, so subsystem can make progess:
// Yield, so subsystem can make progress:
Delay::new(Duration::from_millis(2)).await;

conclude(&mut handle).await;
Expand Down Expand Up @@ -582,7 +582,7 @@ async fn conclude(handle: &mut TestSubsystemContextHandle<DisputeDistributionMes
poll_fn(|ctx| {
let fut = handle.recv();
pin_mut!(fut);
// No requests should be inititated, as there is no longer any dispute active:
// No requests should be initiated, as there is no longer any dispute active:
assert_matches!(fut.poll(ctx), Poll::Pending, "No requests expected");
Poll::Ready(())
})
Expand Down
4 changes: 2 additions & 2 deletions node/test/polkadot-simnet/common/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ where
})?
};

// submit external_propose call through council collective
// submit `external_propose` call through council collective
{
let external_propose = DemocracyCall::external_propose_majority {
proposal_hash: proposal_hash.clone().into(),
Expand Down Expand Up @@ -387,7 +387,7 @@ where
use structopt::StructOpt;

let tokio_runtime = build_runtime()?;
// parse cli args
// parse CLI args
let cmd = <polkadot_cli::Cli as StructOpt>::from_args();
// set up logging
let filters = cmd.run.base.log_filters()?;
Expand Down
4 changes: 2 additions & 2 deletions primitives/src/v1/signed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ impl<Payload: EncodeAs<RealPayload>, RealPayload: Encode> Signed<Payload, RealPa
}
}

// We can't bound this on `Payload: Into<RealPayload>` beacuse that conversion consumes
// We can't bound this on `Payload: Into<RealPayload>` because that conversion consumes
// the payload, and we don't want that. We can't bound it on `Payload: AsRef<RealPayload>`
// because there's no blanket impl of `AsRef<T> for T`. In the end, we just invent our
// own trait which does what we need: EncodeAs.
Expand Down Expand Up @@ -214,7 +214,7 @@ impl<Payload: EncodeAs<RealPayload>, RealPayload: Encode> UncheckedSigned<Payloa
}

fn payload_data<H: Encode>(payload: &Payload, context: &SigningContext<H>) -> Vec<u8> {
// equivalent to (real_payload, context).encode()
// equivalent to (`real_payload`, context).encode()
let mut out = payload.encode_as();
out.extend(context.encode());
out
Expand Down
8 changes: 4 additions & 4 deletions runtime/parachains/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,10 +149,10 @@ impl<T: paras_inherent::Config> BenchBuilder<T> {
/// Mock header.
pub(crate) fn header(block_number: T::BlockNumber) -> T::Header {
T::Header::new(
block_number, // block_number,
Default::default(), // extrinsics_root,
Default::default(), // storage_root,
Default::default(), // parent_hash,
block_number, // `block_number`,
Default::default(), // `extrinsics_root`,
Default::default(), // `storage_root`,
Default::default(), // `parent_hash`,
Default::default(), // digest,
)
}
Expand Down
10 changes: 5 additions & 5 deletions runtime/parachains/src/disputes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ impl StatementSetFilter {

// reverse order ensures correctness
for index in indices.into_iter().rev() {
// swap_remove guarantees linear complexity.
// `swap_remove` guarantees linear complexity.
statement_set.statements.swap_remove(index);
}

Expand Down Expand Up @@ -1579,7 +1579,7 @@ mod tests {
});
}

// Test prunning works
// Test pruning works
#[test]
fn test_initializer_on_new_session() {
let dispute_period = 3;
Expand Down Expand Up @@ -2244,7 +2244,7 @@ mod tests {
Pallet::<Test>::note_included(4, candidate_hash.clone(), 4);
assert_eq!(SpamSlots::<Test>::get(4), Some(vec![0, 0, 0, 0, 0, 0, 0]));

// Ensure the reward_validator function was correctly called
// Ensure the `reward_validator` function was correctly called
assert_eq!(
REWARD_VALIDATORS.with(|r| r.borrow().clone()),
vec![
Expand Down Expand Up @@ -2304,7 +2304,7 @@ mod tests {
assert_noop!(
{
Pallet::<Test>::revert_and_freeze(0);
Result::<(), ()>::Err(()) // Just a small trick in order to use assert_noop.
Result::<(), ()>::Err(()) // Just a small trick in order to use `assert_noop`.
},
(),
);
Expand All @@ -2325,7 +2325,7 @@ mod tests {
assert_noop!(
{
Pallet::<Test>::revert_and_freeze(10);
Result::<(), ()>::Err(()) // Just a small trick in order to use assert_noop.
Result::<(), ()>::Err(()) // Just a small trick in order to use `assert_noop`.
},
(),
);
Expand Down
4 changes: 2 additions & 2 deletions runtime/parachains/src/hrmp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1964,7 +1964,7 @@ mod tests {
assert_eq!(<Test as Config>::Currency::free_balance(&para_a.into_account()), 80);

// Then deregister one parachain, but don't wait two sessions until it takes effect.
// Instead, para_b will confirm the request, which will take place the same time
// Instead, `para_b` will confirm the request, which will take place the same time
// the offboarding should happen.
deregister_parachain(para_a);
run_to_block(9, Some(vec![9]));
Expand All @@ -1973,7 +1973,7 @@ mod tests {
assert!(!channel_exists(para_a, para_b));
run_to_block(10, Some(vec![10]));

// The outcome we expect is para_b should receive the refund.
// The outcome we expect is `para_b` should receive the refund.
assert_eq!(<Test as Config>::Currency::free_balance(&para_b.into_account()), 110);
assert!(!channel_exists(para_a, para_b));
assert_storage_consistency_exhaustive();
Expand Down
Loading

0 comments on commit 34f67e4

Please sign in to comment.