Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Use same fmt and clippy configs as in Substrate #7611

Merged
merged 8 commits into from
Aug 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .cargo/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,5 @@ rustflags = [
"-Aclippy::needless_option_as_deref", # false positives
"-Aclippy::derivable_impls", # false positives
"-Aclippy::stable_sort_primitive", # prefer stable sort
"-Aclippy::extra-unused-type-parameters", # stylistic
]
4 changes: 2 additions & 2 deletions cli/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,8 @@ pub struct RunCmd {
pub overseer_channel_capacity_override: Option<usize>,

/// Path to the directory where auxiliary worker binaries reside. If not specified, the main
/// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY: if
/// the path points to an executable rather then directory, that executable is used both as
/// binary's directory is searched first, then `/usr/lib/polkadot` is searched. TESTING ONLY:
/// if the path points to an executable rather then directory, that executable is used both as
/// preparation and execution worker.
#[arg(long, value_name = "PATH")]
pub workers_path: Option<PathBuf>,
Expand Down
4 changes: 2 additions & 2 deletions cli/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,8 @@ impl SubstrateCli for Cli {
let chain_spec = Box::new(service::PolkadotChainSpec::from_json_file(path.clone())?)
as Box<dyn service::ChainSpec>;

// When `force_*` is given or the file name starts with the name of one of the known chains,
// we use the chain spec for the specific chain.
// When `force_*` is given or the file name starts with the name of one of the known
// chains, we use the chain spec for the specific chain.
if self.run.force_rococo ||
chain_spec.is_rococo() ||
chain_spec.is_wococo() ||
Expand Down
12 changes: 6 additions & 6 deletions core-primitives/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,10 @@ impl sp_std::fmt::Debug for CandidateHash {
pub type Nonce = u32;

/// The balance of an account.
/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a resolution
/// to all for one second's worth of an annualised 50% reward be paid to a unit holder (`10^11` unit
/// denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years (`10^9` multiplier)
/// for an eventual total of `10^27` units (27 significant decimal figures).
/// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a
/// resolution to all for one second's worth of an annualised 50% reward be paid to a unit holder
/// (`10^11` unit denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years
/// (`10^9` multiplier) for an eventual total of `10^27` units (27 significant decimal figures).
/// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so
/// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow.
pub type Balance = u128;
Expand All @@ -121,8 +121,8 @@ pub type Remark = [u8; 32];
/// The size of the message is limited by the `config.max_downward_message_size` parameter.
pub type DownwardMessage = sp_std::vec::Vec<u8>;

/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number when
/// the message was sent.
/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number
/// when the message was sent.
#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)]
pub struct InboundDownwardMessage<BlockNumber = crate::BlockNumber> {
/// The block number at which these messages were put into the downward message queue.
Expand Down
22 changes: 13 additions & 9 deletions node/collation-generation/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@
//!
//! * If there is no collation generation config, ignore.
//! * Otherwise, for each `activated` head in the update:
//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime API.
//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime
//! API.
//! * Use the Runtime API subsystem to fetch the full validation data.
//! * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with the configuration's `key`.
//! * Invoke the `collator`, and use its outputs to produce a [`CandidateReceipt`], signed with
//! the configuration's `key`.
//! * Dispatch a [`CollatorProtocolMessage::DistributeCollation`]`(receipt, pov)`.

#![deny(missing_docs)]
Expand Down Expand Up @@ -77,8 +79,8 @@ impl CollationGenerationSubsystem {
/// Conceptually, this is very simple: it just loops forever.
///
/// - On incoming overseer messages, it starts or stops jobs as appropriate.
/// - On other incoming messages, if they can be converted into `Job::ToJob` and
/// include a hash, then they're forwarded to the appropriate individual job.
/// - On other incoming messages, if they can be converted into `Job::ToJob` and include a hash,
/// then they're forwarded to the appropriate individual job.
/// - On outgoing messages from the jobs, it forwards them to the overseer.
///
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
Expand Down Expand Up @@ -109,9 +111,10 @@ impl CollationGenerationSubsystem {
}

// handle an incoming message. return true if we should break afterwards.
// note: this doesn't strictly need to be a separate function; it's more an administrative function
// so that we don't clutter the run loop. It could in principle be inlined directly into there.
// it should hopefully therefore be ok that it's an async function mutably borrowing self.
// note: this doesn't strictly need to be a separate function; it's more an administrative
// function so that we don't clutter the run loop. It could in principle be inlined directly
// into there. it should hopefully therefore be ok that it's an async function mutably borrowing
// self.
async fn handle_incoming<Context>(
&mut self,
incoming: SubsystemResult<FromOrchestra<<Context as SubsystemContext>::Message>>,
Expand Down Expand Up @@ -319,8 +322,9 @@ async fn handle_new_activations<Context>(
// As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures
// that honest collators never produce a PoV which is uncompressed.
//
// As such, honest collators never produce an uncompressed PoV which starts with
// a compression magic number, which would lead validators to reject the collation.
// As such, honest collators never produce an uncompressed PoV which starts
// with a compression magic number, which would lead validators to reject
// the collation.
if encoded_size > validation_data.max_pov_size as usize {
gum::debug!(
target: LOG_TARGET,
Expand Down
10 changes: 5 additions & 5 deletions node/collation-generation/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,9 @@ mod handle_new_activations {
.into_inner();

// the only activated hash should be from the 4 hash:
// each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5
// given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4
// hash.
// each activated hash generates two scheduled cores: one with its value * 4, one with its
// value * 5 given that the test configuration has a `para_id` of 16, there's only one way
// to get that value: with the 4 hash.
assert_eq!(requested_validation_data, vec![[4; 32].into()]);
}

Expand Down Expand Up @@ -301,8 +301,8 @@ mod handle_new_activations {
.into_inner();

// we expect a single message to be sent, containing a candidate receipt.
// we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the
// correct descriptor
// we don't care too much about the `commitments_hash` right now, but let's ensure that
// we've calculated the correct descriptor
let expect_pov_hash =
test_collation_compressed().proof_of_validity.into_compressed().hash();
let expect_validation_data_hash = test_validation_data().hash();
Expand Down
8 changes: 4 additions & 4 deletions node/core/approval-voting/src/approval_checking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ pub enum RequiredTranches {
/// assignments that are before the local time.
maximum_broadcast: DelayTranche,
/// The clock drift, in ticks, to apply to the local clock when determining whether
/// to broadcast an assignment or when to schedule a wakeup. The local clock should be treated
/// as though it is `clock_drift` ticks earlier.
/// to broadcast an assignment or when to schedule a wakeup. The local clock should be
/// treated as though it is `clock_drift` ticks earlier.
clock_drift: Tick,
},
/// An exact number of required tranches and a number of no-shows. This indicates that
Expand All @@ -55,8 +55,8 @@ pub enum RequiredTranches {
/// The amount of missing votes that should be tolerated.
tolerated_missing: usize,
/// When the next no-show would be, if any. This is used to schedule the next wakeup in the
/// event that there are some assignments that don't have corresponding approval votes. If this
/// is `None`, all assignments have approvals.
/// event that there are some assignments that don't have corresponding approval votes. If
/// this is `None`, all assignments have approvals.
next_no_show: Option<Tick>,
/// The last tick at which a needed assignment was received.
last_assignment_tick: Option<Tick>,
Expand Down
15 changes: 8 additions & 7 deletions node/core/approval-voting/src/criteria.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,13 +218,14 @@ impl AssignmentCriteria for RealAssignmentCriteria {
}

/// Compute the assignments for a given block. Returns a map containing all assignments to cores in
/// the block. If more than one assignment targets the given core, only the earliest assignment is kept.
/// the block. If more than one assignment targets the given core, only the earliest assignment is
/// kept.
///
/// The `leaving_cores` parameter indicates all cores within the block where a candidate was included,
/// as well as the group index backing those.
/// The `leaving_cores` parameter indicates all cores within the block where a candidate was
/// included, as well as the group index backing those.
///
/// The current description of the protocol assigns every validator to check every core. But at different times.
/// The idea is that most assignments are never triggered and fall by the wayside.
/// The current description of the protocol assigns every validator to check every core. But at
/// different times. The idea is that most assignments are never triggered and fall by the wayside.
///
/// This will not assign to anything the local validator was part of the backing group for.
pub(crate) fn compute_assignments(
Expand Down Expand Up @@ -463,8 +464,8 @@ pub(crate) enum InvalidAssignmentReason {
/// * Sample is out of bounds
/// * Validator is present in backing group.
///
/// This function does not check whether the core is actually a valid assignment or not. That should be done
/// outside the scope of this function.
/// This function does not check whether the core is actually a valid assignment or not. That should
/// be done outside the scope of this function.
pub(crate) fn check_assignment_cert(
claimed_core_index: CoreIndex,
validator_index: ValidatorIndex,
Expand Down
28 changes: 15 additions & 13 deletions node/core/approval-voting/src/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ enum ImportedBlockInfoError {
VrfInfoUnavailable,
}

/// Computes information about the imported block. Returns an error if the info couldn't be extracted.
/// Computes information about the imported block. Returns an error if the info couldn't be
/// extracted.
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
async fn imported_block_info<Context>(
ctx: &mut Context,
Expand Down Expand Up @@ -181,20 +182,21 @@ async fn imported_block_info<Context>(
// It's not obvious whether to use the hash or the parent hash for this, intuitively. We
// want to use the block hash itself, and here's why:
//
// First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology from
// the paper, which we fulfill using 'session's, which are a Substrate consensus concept.
// First off, 'epoch' in BABE means 'session' in other places. 'epoch' is the terminology
// from the paper, which we fulfill using 'session's, which are a Substrate consensus
// concept.
//
// In BABE, the on-chain and off-chain view of the current epoch can differ at epoch boundaries
// because epochs change precisely at a slot. When a block triggers a new epoch, the state of
// its parent will still have the old epoch. Conversely, we have the invariant that every
// block in BABE has the epoch _it was authored in_ within its post-state. So we use the
// block, and not its parent.
// In BABE, the on-chain and off-chain view of the current epoch can differ at epoch
// boundaries because epochs change precisely at a slot. When a block triggers a new epoch,
// the state of its parent will still have the old epoch. Conversely, we have the invariant
// that every block in BABE has the epoch _it was authored in_ within its post-state. So we
// use the block, and not its parent.
//
// It's worth nothing that Polkadot session changes, at least for the purposes of parachains,
// would function the same way, except for the fact that they're always delayed by one block.
// This gives us the opposite invariant for sessions - the parent block's post-state gives
// us the canonical information about the session index for any of its children, regardless
// of which slot number they might be produced at.
// It's worth nothing that Polkadot session changes, at least for the purposes of
// parachains, would function the same way, except for the fact that they're always delayed
// by one block. This gives us the opposite invariant for sessions - the parent block's
// post-state gives us the canonical information about the session index for any of its
// children, regardless of which slot number they might be produced at.
ctx.send_message(RuntimeApiMessage::Request(
block_hash,
RuntimeApiRequest::CurrentBabeEpoch(s_tx),
Expand Down
35 changes: 20 additions & 15 deletions node/core/approval-voting/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1232,8 +1232,8 @@ async fn handle_from_overseer<Context>(
);

// Our first wakeup will just be the tranche of our assignment,
// if any. This will likely be superseded by incoming assignments
// and approvals which trigger rescheduling.
// if any. This will likely be superseded by incoming
// assignments and approvals which trigger rescheduling.
actions.push(Action::ScheduleWakeup {
block_hash: block_batch.block_hash,
block_number: block_batch.block_number,
Expand All @@ -1256,12 +1256,14 @@ async fn handle_from_overseer<Context>(
crate::ops::canonicalize(db, block_number, block_hash)
.map_err(|e| SubsystemError::with_origin("db", e))?;

// `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly.
// `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans
// accordingly.
wakeups.prune_finalized_wakeups(block_number, &mut state.spans);

// // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans accordingly.
// let hash_set = wakeups.block_numbers.values().flatten().collect::<HashSet<_>>();
// state.spans.retain(|hash, _| hash_set.contains(hash));
// // `prune_finalized_wakeups` prunes all finalized block hashes. We prune spans
// accordingly. let hash_set =
// wakeups.block_numbers.values().flatten().collect::<HashSet<_>>(); state.spans.
// retain(|hash, _| hash_set.contains(hash));

Vec::new()
},
Expand Down Expand Up @@ -1403,8 +1405,8 @@ async fn get_approval_signatures_for_candidate<Context>(
tx_distribution,
));

// Because of the unbounded sending and the nature of the call (just fetching data from state),
// this should not block long:
// Because of the unbounded sending and the nature of the call (just fetching data from
// state), this should not block long:
match rx_distribution.timeout(WAIT_FOR_SIGS_TIMEOUT).await {
None => {
gum::warn!(
Expand Down Expand Up @@ -2117,9 +2119,10 @@ impl ApprovalStateTransition {
}
}

// Advance the approval state, either by importing an approval vote which is already checked to be valid and corresponding to an assigned
// validator on the candidate and block, or by noting that there are no further wakeups or tranches needed. This updates the block entry and candidate entry as
// necessary and schedules any further wakeups.
// Advance the approval state, either by importing an approval vote which is already checked to be
// valid and corresponding to an assigned validator on the candidate and block, or by noting that
// there are no further wakeups or tranches needed. This updates the block entry and candidate entry
// as necessary and schedules any further wakeups.
async fn advance_approval_state<Sender>(
sender: &mut Sender,
state: &State,
Expand Down Expand Up @@ -2251,7 +2254,8 @@ where
// 1. This is not a local approval, as we don't store anything new in the approval entry.
// 2. The candidate is not newly approved, as we haven't altered the approval entry's
// approved flag with `mark_approved` above.
// 3. The approver, if any, had already approved the candidate, as we haven't altered the bitfield.
// 3. The approver, if any, had already approved the candidate, as we haven't altered the
// bitfield.
if transition.is_local_approval() || newly_approved || !already_approved_by.unwrap_or(true)
{
// In all other cases, we need to write the candidate entry.
Expand Down Expand Up @@ -2279,7 +2283,8 @@ fn should_trigger_assignment(
&approval_entry,
RequiredTranches::All,
)
.is_approved(Tick::max_value()), // when all are required, we are just waiting for the first 1/3+
// when all are required, we are just waiting for the first 1/3+
.is_approved(Tick::max_value()),
RequiredTranches::Pending { maximum_broadcast, clock_drift, .. } => {
let drifted_tranche_now =
tranche_now.saturating_sub(clock_drift as DelayTranche);
Expand Down Expand Up @@ -2615,8 +2620,8 @@ async fn launch_approval<Context>(
match val_rx.await {
Err(_) => return ApprovalState::failed(validator_index, candidate_hash),
Ok(Ok(ValidationResult::Valid(_, _))) => {
// Validation checked out. Issue an approval command. If the underlying service is unreachable,
// then there isn't anything we can do.
// Validation checked out. Issue an approval command. If the underlying service is
// unreachable, then there isn't anything we can do.

gum::trace!(target: LOG_TARGET, ?candidate_hash, ?para_id, "Candidate Valid");

Expand Down
7 changes: 4 additions & 3 deletions node/core/approval-voting/src/ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,8 @@ pub fn canonicalize(
}
}

// Update all blocks-at-height keys, deleting all those which now have empty `block_assignments`.
// Update all blocks-at-height keys, deleting all those which now have empty
// `block_assignments`.
for (h, at) in visited_heights.into_iter() {
if at.is_empty() {
overlay_db.delete_blocks_at_height(h);
Expand All @@ -170,8 +171,8 @@ pub fn canonicalize(
}
}

// due to the fork pruning, this range actually might go too far above where our actual highest block is,
// if a relatively short fork is canonicalized.
// due to the fork pruning, this range actually might go too far above where our actual highest
// block is, if a relatively short fork is canonicalized.
// TODO https://github.com/paritytech/polkadot/issues/3389
let new_range = StoredBlockRange(canon_number + 1, std::cmp::max(range.1, canon_number + 2));

Expand Down
Loading