diff --git a/Cargo.lock b/Cargo.lock index 04e01285..7882cf3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10514,7 +10514,6 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.43#5e49f6e44820affccaf517fd22af564f4b495d40" dependencies = [ "ahash 0.8.3", "array-bytes 4.2.0", diff --git a/Cargo.toml b/Cargo.toml index 2087f663..b05be325 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ exclude = [ "vendor/ring", "polkadot/node/service", "substrate/client/transaction-pool", + "substrate/client/consensus/grandpa", ] members = [ @@ -60,3 +61,5 @@ polkadot-service = { path = "polkadot/node/service" } # Remove after 0.9.44 sc-transaction-pool = { path = "substrate/client/transaction-pool" } sc-transaction-pool-api = { path = "substrate/client/transaction-pool/api" } +# Remove after 0.9.45 +sc-consensus-grandpa = { path = "substrate/client/consensus/grandpa" } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index c9e5518e..e8aec88b 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -130,6 +130,10 @@ pub use {rococo_runtime, rococo_runtime_constants}; #[cfg(feature = "westend-native")] pub use {westend_runtime, westend_runtime_constants}; +/// The minimum period of blocks on which justifications will be +/// imported and generated. +const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; + /// Provides the header and block number for a hash. /// /// Decouples `sc_client_api::Backend` and `sp_blockchain::HeaderBackend`. @@ -511,6 +515,7 @@ where let (grandpa_block_import, grandpa_link) = grandpa::block_import_with_authority_set_hard_forks( client.clone(), + GRANDPA_JUSTIFICATION_PERIOD, &(client.clone() as Arc<_>), select_chain.clone(), grandpa_hard_forks, @@ -1186,7 +1191,7 @@ where // Grandpa performance can be improved a bit by tuning this parameter, see: // https://github.com/paritytech/polkadot/issues/5464 gossip_duration: Duration::from_millis(1000), - justification_period: 1, + justification_generation_period: 1, name: Some(name), observer_enabled: false, keystore: keystore_opt, diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml new file mode 100644 index 00000000..c7a0ab97 --- /dev/null +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "sc-consensus-grandpa" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Integration of the GRANDPA finality gadget into substrate." +documentation = "https://docs.rs/sc-consensus-grandpa" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +ahash = "0.8.2" +array-bytes = "4.1" +async-trait = "0.1.57" +dyn-clone = "1.0" +finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } +futures = "0.3.21" +futures-timer = "3.0.1" +log = "0.4.17" +parity-scale-codec = { version = "3.2.2", features = ["derive"] } +parking_lot = "0.12.1" +rand = "0.8.5" +serde_json = "1.0.85" +thiserror = "1.0" +fork-tree = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-network-gossip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-network-common = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-consensus-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } + +[dev-dependencies] +assert_matches = "1.3.0" +finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } +serde = "1.0.136" +tokio = "1.22.0" +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sc-network-test = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +sp-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } +substrate-test-runtime-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43" } diff --git a/substrate/client/consensus/grandpa/README.md b/substrate/client/consensus/grandpa/README.md new file mode 100644 index 00000000..64a7e70b --- /dev/null +++ b/substrate/client/consensus/grandpa/README.md @@ -0,0 +1,39 @@ +Integration of the GRANDPA finality gadget into substrate. + +This crate is unstable and the API and usage may change. + +This crate provides a long-running future that produces finality notifications. + +# Usage + +First, create a block-import wrapper with the `block_import` function. The +GRANDPA worker needs to be linked together with this block import object, so +a `LinkHalf` is returned as well. All blocks imported (from network or +consensus or otherwise) must pass through this wrapper, otherwise consensus +is likely to break in unexpected ways. + +Next, use the `LinkHalf` and a local configuration to `run_grandpa_voter`. +This requires a `Network` implementation. The returned future should be +driven to completion and will finalize blocks in the background. + +# Changing authority sets + +The rough idea behind changing authority sets in GRANDPA is that at some point, +we obtain agreement for some maximum block height that the current set can +finalize, and once a block with that height is finalized the next set will +pick up finalization from there. + +Technically speaking, this would be implemented as a voting rule which says, +"if there is a signal for a change in N blocks in block B, only vote on +chains with length NUM(B) + N if they contain B". This conditional-inclusion +logic is complex to compute because it requires looking arbitrarily far +back in the chain. + +Instead, we keep track of a list of all signals we've seen so far (across +all forks), sorted ascending by the block number they would be applied at. +We never vote on chains with number higher than the earliest handoff block +number (this is num(signal) + N). When finalizing a block, we either apply +or prune any signaled changes based on whether the signaling block is +included in the newly-finalized chain. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/substrate/client/consensus/grandpa/src/authorities.rs b/substrate/client/consensus/grandpa/src/authorities.rs new file mode 100644 index 00000000..623223e4 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/authorities.rs @@ -0,0 +1,1752 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Utilities for dealing with authorities, authority sets, and handoffs. + +use std::{cmp::Ord, fmt::Debug, ops::Add}; + +use finality_grandpa::voter_set::VoterSet; +use fork_tree::{FilterAction, ForkTree}; +use log::debug; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::MappedMutexGuard; +use sc_consensus::shared_data::{SharedData, SharedDataLocked}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; +use sp_consensus_grandpa::{AuthorityId, AuthorityList}; + +use crate::{SetId, LOG_TARGET}; + +/// Error type returned on operations on the `AuthoritySet`. +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Invalid authority set, either empty or with an authority weight set to 0.")] + InvalidAuthoritySet, + #[error("Client error during ancestry lookup: {0}")] + Client(E), + #[error("Duplicate authority set change.")] + DuplicateAuthoritySetChange, + #[error("Multiple pending forced authority set changes are not allowed.")] + MultiplePendingForcedAuthoritySetChanges, + #[error( + "A pending forced authority set change could not be applied since it must be applied \ + after the pending standard change at #{0}" + )] + ForcedAuthoritySetChangeDependencyUnsatisfied(N), + #[error("Invalid operation in the pending changes tree: {0}")] + ForkTree(fork_tree::Error), +} + +impl From> for Error { + fn from(err: fork_tree::Error) -> Error { + match err { + fork_tree::Error::Client(err) => Error::Client(err), + fork_tree::Error::Duplicate => Error::DuplicateAuthoritySetChange, + err => Error::ForkTree(err), + } + } +} + +impl From for Error { + fn from(err: E) -> Error { + Error::Client(err) + } +} + +/// A shared authority set. +pub struct SharedAuthoritySet { + inner: SharedData>, +} + +impl Clone for SharedAuthoritySet { + fn clone(&self) -> Self { + SharedAuthoritySet { inner: self.inner.clone() } + } +} + +impl SharedAuthoritySet { + /// Returns access to the [`AuthoritySet`]. + pub(crate) fn inner(&self) -> MappedMutexGuard> { + self.inner.shared_data() + } + + /// Returns access to the [`AuthoritySet`] and locks it. + /// + /// For more information see [`SharedDataLocked`]. + pub(crate) fn inner_locked(&self) -> SharedDataLocked> { + self.inner.shared_data_locked() + } +} + +impl SharedAuthoritySet +where + N: Add + Ord + Clone + Debug, + H: Clone + Debug, +{ + /// Get the earliest limit-block number that's higher or equal to the given + /// min number, if any. + pub(crate) fn current_limit(&self, min: N) -> Option { + self.inner().current_limit(min) + } + + /// Get the current set ID. This is incremented every time the set changes. + pub fn set_id(&self) -> u64 { + self.inner().set_id + } + + /// Get the current authorities and their weights (for the current set ID). + pub fn current_authorities(&self) -> VoterSet { + VoterSet::new(self.inner().current_authorities.iter().cloned()).expect( + "current_authorities is non-empty and weights are non-zero; \ + constructor and all mutating operations on `AuthoritySet` ensure this; \ + qed.", + ) + } + + /// Clone the inner `AuthoritySet`. + pub fn clone_inner(&self) -> AuthoritySet { + self.inner().clone() + } + + /// Clone the inner `AuthoritySetChanges`. + pub fn authority_set_changes(&self) -> AuthoritySetChanges { + self.inner().authority_set_changes.clone() + } +} + +impl From> for SharedAuthoritySet { + fn from(set: AuthoritySet) -> Self { + SharedAuthoritySet { inner: SharedData::new(set) } + } +} + +/// Status of the set after changes were applied. +#[derive(Debug)] +pub(crate) struct Status { + /// Whether internal changes were made. + pub(crate) changed: bool, + /// `Some` when underlying authority set has changed, containing the + /// block where that set changed. + pub(crate) new_set_block: Option<(H, N)>, +} + +/// A set of authorities. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub struct AuthoritySet { + /// The current active authorities. + pub(crate) current_authorities: AuthorityList, + /// The current set id. + pub(crate) set_id: u64, + /// Tree of pending standard changes across forks. Standard changes are + /// enacted on finality and must be enacted (i.e. finalized) in-order across + /// a given branch + pub(crate) pending_standard_changes: ForkTree>, + /// Pending forced changes across different forks (at most one per fork). + /// Forced changes are enacted on block depth (not finality), for this + /// reason only one forced change should exist per fork. When trying to + /// apply forced changes we keep track of any pending standard changes that + /// they may depend on, this is done by making sure that any pending change + /// that is an ancestor of the forced changed and its effective block number + /// is lower than the last finalized block (as signaled in the forced + /// change) must be applied beforehand. + pending_forced_changes: Vec>, + /// Track at which blocks the set id changed. This is useful when we need to prove finality for + /// a given block since we can figure out what set the block belongs to and when the set + /// started/ended. + pub(crate) authority_set_changes: AuthoritySetChanges, +} + +impl AuthoritySet +where + H: PartialEq, + N: Ord + Clone, +{ + // authority sets must be non-empty and all weights must be greater than 0 + fn invalid_authority_list(authorities: &AuthorityList) -> bool { + authorities.is_empty() || authorities.iter().any(|(_, w)| *w == 0) + } + + /// Get a genesis set with given authorities. + pub(crate) fn genesis(initial: AuthorityList) -> Option { + if Self::invalid_authority_list(&initial) { + return None + } + + Some(AuthoritySet { + current_authorities: initial, + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }) + } + + /// Create a new authority set. + pub(crate) fn new( + authorities: AuthorityList, + set_id: u64, + pending_standard_changes: ForkTree>, + pending_forced_changes: Vec>, + authority_set_changes: AuthoritySetChanges, + ) -> Option { + if Self::invalid_authority_list(&authorities) { + return None + } + + Some(AuthoritySet { + current_authorities: authorities, + set_id, + pending_standard_changes, + pending_forced_changes, + authority_set_changes, + }) + } + + /// Get the current set id and a reference to the current authority set. + pub(crate) fn current(&self) -> (u64, &[(AuthorityId, u64)]) { + (self.set_id, &self.current_authorities[..]) + } + + /// Revert to a specified block given its `hash` and `number`. + /// This removes all the authority set changes that were announced after + /// the revert point. + /// Revert point is identified by `number` and `hash`. + pub(crate) fn revert(&mut self, hash: H, number: N, is_descendent_of: &F) + where + F: Fn(&H, &H) -> Result, + { + let filter = |node_hash: &H, node_num: &N, _: &PendingChange| { + if number >= *node_num && + (is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash) + { + // Continue the search in this subtree. + FilterAction::KeepNode + } else if number < *node_num && is_descendent_of(&hash, node_hash).unwrap_or_default() { + // Found a node to be removed. + FilterAction::Remove + } else { + // Not a parent or child of the one we're looking for, stop processing this branch. + FilterAction::KeepTree + } + }; + + // Remove standard changes. + let _ = self.pending_standard_changes.drain_filter(&filter); + + // Remove forced changes. + self.pending_forced_changes + .retain(|change| !is_descendent_of(&hash, &change.canon_hash).unwrap_or_default()); + } +} + +impl AuthoritySet +where + N: Add + Ord + Clone + Debug, + H: Clone + Debug, +{ + /// Returns the block hash and height at which the next pending change in + /// the given chain (i.e. it includes `best_hash`) was signalled, `None` if + /// there are no pending changes for the given chain. + /// + /// This is useful since we know that when a change is signalled the + /// underlying runtime authority set management module (e.g. session module) + /// has updated its internal state (e.g. a new session started). + pub(crate) fn next_change( + &self, + best_hash: &H, + is_descendent_of: &F, + ) -> Result, Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let mut forced = None; + for change in &self.pending_forced_changes { + if is_descendent_of(&change.canon_hash, best_hash)? { + forced = Some((change.canon_hash.clone(), change.canon_height.clone())); + break + } + } + + let mut standard = None; + for (_, _, change) in self.pending_standard_changes.roots() { + if is_descendent_of(&change.canon_hash, best_hash)? { + standard = Some((change.canon_hash.clone(), change.canon_height.clone())); + break + } + } + + let earliest = match (forced, standard) { + (Some(forced), Some(standard)) => + Some(if forced.1 < standard.1 { forced } else { standard }), + (Some(forced), None) => Some(forced), + (None, Some(standard)) => Some(standard), + (None, None) => None, + }; + + Ok(earliest) + } + + fn add_standard_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let hash = pending.canon_hash.clone(); + let number = pending.canon_height.clone(); + + debug!( + target: LOG_TARGET, + "Inserting potential standard set change signaled at block {:?} (delayed by {:?} blocks).", + (&number, &hash), + pending.delay, + ); + + self.pending_standard_changes.import(hash, number, pending, is_descendent_of)?; + + debug!( + target: LOG_TARGET, + "There are now {} alternatives for the next pending standard change (roots), and a \ + total of {} pending standard changes (across all forks).", + self.pending_standard_changes.roots().count(), + self.pending_standard_changes.iter().count(), + ); + + Ok(()) + } + + fn add_forced_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + for change in &self.pending_forced_changes { + if change.canon_hash == pending.canon_hash { + return Err(Error::DuplicateAuthoritySetChange) + } + + if is_descendent_of(&change.canon_hash, &pending.canon_hash)? { + return Err(Error::MultiplePendingForcedAuthoritySetChanges) + } + } + + // ordered first by effective number and then by signal-block number. + let key = (pending.effective_number(), pending.canon_height.clone()); + let idx = self + .pending_forced_changes + .binary_search_by_key(&key, |change| { + (change.effective_number(), change.canon_height.clone()) + }) + .unwrap_or_else(|i| i); + + debug!( + target: LOG_TARGET, + "Inserting potential forced set change at block {:?} (delayed by {:?} blocks).", + (&pending.canon_height, &pending.canon_hash), + pending.delay, + ); + + self.pending_forced_changes.insert(idx, pending); + + debug!( + target: LOG_TARGET, + "There are now {} pending forced changes.", + self.pending_forced_changes.len() + ); + + Ok(()) + } + + /// Note an upcoming pending transition. Multiple pending standard changes + /// on the same branch can be added as long as they don't overlap. Forced + /// changes are restricted to one per fork. This method assumes that changes + /// on the same branch will be added in-order. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + pub(crate) fn add_pending_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + if Self::invalid_authority_list(&pending.next_authorities) { + return Err(Error::InvalidAuthoritySet) + } + + match pending.delay_kind { + DelayKind::Best { .. } => self.add_forced_change(pending, is_descendent_of), + DelayKind::Finalized => self.add_standard_change(pending, is_descendent_of), + } + } + + /// Inspect pending changes. Standard pending changes are iterated first, + /// and the changes in the tree are traversed in pre-order, afterwards all + /// forced changes are iterated. + pub(crate) fn pending_changes(&self) -> impl Iterator> { + self.pending_standard_changes + .iter() + .map(|(_, _, c)| c) + .chain(self.pending_forced_changes.iter()) + } + + /// Get the earliest limit-block number, if any. If there are pending changes across + /// different forks, this method will return the earliest effective number (across the + /// different branches) that is higher or equal to the given min number. + /// + /// Only standard changes are taken into account for the current + /// limit, since any existing forced change should preclude the voter from voting. + pub(crate) fn current_limit(&self, min: N) -> Option { + self.pending_standard_changes + .roots() + .filter(|&(_, _, c)| c.effective_number() >= min) + .min_by_key(|&(_, _, c)| c.effective_number()) + .map(|(_, _, c)| c.effective_number()) + } + + /// Apply or prune any pending transitions based on a best-block trigger. + /// + /// Returns `Ok((median, new_set))` when a forced change has occurred. The + /// median represents the median last finalized block at the time the change + /// was signaled, and it should be used as the canon block when starting the + /// new grandpa voter. Only alters the internal state in this case. + /// + /// These transitions are always forced and do not lead to justifications + /// which light clients can follow. + /// + /// Forced changes can only be applied after all pending standard changes + /// that it depends on have been applied. If any pending standard change + /// exists that is an ancestor of a given forced changed and which effective + /// block number is lower than the last finalized block (as defined by the + /// forced change), then the forced change cannot be applied. An error will + /// be returned in that case which will prevent block import. + pub(crate) fn apply_forced_changes( + &self, + best_hash: H, + best_number: N, + is_descendent_of: &F, + initial_sync: bool, + telemetry: Option, + ) -> Result, Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let mut new_set = None; + + for change in self + .pending_forced_changes + .iter() + .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far + .filter(|c| c.effective_number() == best_number) + { + // check if the given best block is in the same branch as + // the block that signaled the change. + if change.canon_hash == best_hash || is_descendent_of(&change.canon_hash, &best_hash)? { + let median_last_finalized = match change.delay_kind { + DelayKind::Best { ref median_last_finalized } => median_last_finalized.clone(), + _ => unreachable!( + "pending_forced_changes only contains forced changes; forced changes have delay kind Best; qed." + ), + }; + + // check if there's any pending standard change that we depend on + for (_, _, standard_change) in self.pending_standard_changes.roots() { + if standard_change.effective_number() <= median_last_finalized && + is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? + { + log::info!(target: LOG_TARGET, + "Not applying authority set change forced at block #{:?}, due to pending standard change at block #{:?}", + change.canon_height, + standard_change.effective_number(), + ); + + return Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied( + standard_change.effective_number(), + )) + } + } + + // apply this change: make the set canonical + grandpa_log!( + initial_sync, + "👴 Applying authority set change forced at block #{:?}", + change.canon_height, + ); + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.applying_forced_authority_set_change"; + "block" => ?change.canon_height + ); + + let mut authority_set_changes = self.authority_set_changes.clone(); + authority_set_changes.append(self.set_id, median_last_finalized.clone()); + + new_set = Some(( + median_last_finalized, + AuthoritySet { + current_authorities: change.next_authorities.clone(), + set_id: self.set_id + 1, + pending_standard_changes: ForkTree::new(), // new set, new changes. + pending_forced_changes: Vec::new(), + authority_set_changes, + }, + )); + + break + } + } + + // we don't wipe forced changes until another change is applied, hence + // why we return a new set instead of mutating. + Ok(new_set) + } + + /// Apply or prune any pending transitions based on a finality trigger. This + /// method ensures that if there are multiple changes in the same branch, + /// finalizing this block won't finalize past multiple transitions (i.e. + /// transitions must be finalized in-order). The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + /// + /// When the set has changed, the return value will be `Ok(Some((H, N)))` + /// which is the canonical block where the set last changed (i.e. the given + /// hash and number). + pub(crate) fn apply_standard_changes( + &mut self, + finalized_hash: H, + finalized_number: N, + is_descendent_of: &F, + initial_sync: bool, + telemetry: Option<&TelemetryHandle>, + ) -> Result, Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let mut status = Status { changed: false, new_set_block: None }; + + match self.pending_standard_changes.finalize_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() <= finalized_number, + )? { + fork_tree::FinalizationResult::Changed(change) => { + status.changed = true; + + let pending_forced_changes = std::mem::take(&mut self.pending_forced_changes); + + // we will keep all forced changes for any later blocks and that are a + // descendent of the finalized block (i.e. they are part of this branch). + for change in pending_forced_changes { + if change.effective_number() > finalized_number && + is_descendent_of(&finalized_hash, &change.canon_hash)? + { + self.pending_forced_changes.push(change) + } + } + + if let Some(change) = change { + grandpa_log!( + initial_sync, + "👴 Applying authority set change scheduled at block #{:?}", + change.canon_height, + ); + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.applying_scheduled_authority_set_change"; + "block" => ?change.canon_height + ); + + // Store the set_id together with the last block_number for the set + self.authority_set_changes.append(self.set_id, finalized_number.clone()); + + self.current_authorities = change.next_authorities; + self.set_id += 1; + + status.new_set_block = Some((finalized_hash, finalized_number)); + } + }, + fork_tree::FinalizationResult::Unchanged => {}, + } + + Ok(status) + } + + /// Check whether the given finalized block number enacts any standard + /// authority set change (without triggering it), ensuring that if there are + /// multiple changes in the same branch, finalizing this block won't + /// finalize past multiple transitions (i.e. transitions must be finalized + /// in-order). Returns `Some(true)` if the block being finalized enacts a + /// change that can be immediately applied, `Some(false)` if the block being + /// finalized enacts a change but it cannot be applied yet since there are + /// other dependent changes, and `None` if no change is enacted. The given + /// function `is_descendent_of` should return `true` if the second hash + /// (target) is a descendent of the first hash (base). + pub fn enacts_standard_change( + &self, + finalized_hash: H, + finalized_number: N, + is_descendent_of: &F, + ) -> Result, Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + self.pending_standard_changes + .finalizes_any_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() == finalized_number, + ) + .map_err(Error::ForkTree) + } +} + +/// Kinds of delays for pending changes. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub enum DelayKind { + /// Depth in finalized chain. + Finalized, + /// Depth in best chain. The median last finalized block is calculated at the time the + /// change was signaled. + Best { median_last_finalized: N }, +} + +/// A pending change to the authority set. +/// +/// This will be applied when the announcing block is at some depth within +/// the finalized or unfinalized chain. +#[derive(Debug, Clone, Encode, PartialEq)] +pub struct PendingChange { + /// The new authorities and weights to apply. + pub(crate) next_authorities: AuthorityList, + /// How deep in the chain the announcing block must be + /// before the change is applied. + pub(crate) delay: N, + /// The announcing block's height. + pub(crate) canon_height: N, + /// The announcing block's hash. + pub(crate) canon_hash: H, + /// The delay kind. + pub(crate) delay_kind: DelayKind, +} + +impl Decode for PendingChange { + fn decode( + value: &mut I, + ) -> Result { + let next_authorities = Decode::decode(value)?; + let delay = Decode::decode(value)?; + let canon_height = Decode::decode(value)?; + let canon_hash = Decode::decode(value)?; + + let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); + + Ok(PendingChange { next_authorities, delay, canon_height, canon_hash, delay_kind }) + } +} + +impl + Clone> PendingChange { + /// Returns the effective number this change will be applied at. + pub fn effective_number(&self) -> N { + self.canon_height.clone() + self.delay.clone() + } +} + +/// Tracks historical authority set changes. We store the block numbers for the last block +/// of each authority set, once they have been finalized. These blocks are guaranteed to +/// have a justification unless they were triggered by a forced change. +#[derive(Debug, Encode, Decode, Clone, PartialEq)] +pub struct AuthoritySetChanges(Vec<(u64, N)>); + +/// The response when querying for a set id for a specific block. Either we get a set id +/// together with a block number for the last block in the set, or that the requested block is in +/// the latest set, or that we don't know what set id the given block belongs to. +#[derive(Debug, PartialEq)] +pub enum AuthoritySetChangeId { + /// The requested block is in the latest set. + Latest, + /// Tuple containing the set id and the last block number of that set. + Set(SetId, N), + /// We don't know which set id the request block belongs to (this can only happen due to + /// missing data). + Unknown, +} + +impl From> for AuthoritySetChanges { + fn from(changes: Vec<(u64, N)>) -> AuthoritySetChanges { + AuthoritySetChanges(changes) + } +} + +impl AuthoritySetChanges { + pub(crate) fn empty() -> Self { + Self(Default::default()) + } + + pub(crate) fn append(&mut self, set_id: u64, block_number: N) { + self.0.push((set_id, block_number)); + } + + pub(crate) fn get_set_id(&self, block_number: N) -> AuthoritySetChangeId { + if self + .0 + .last() + .map(|last_auth_change| last_auth_change.1 < block_number) + .unwrap_or(false) + { + return AuthoritySetChangeId::Latest + } + + let idx = self + .0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) + .unwrap_or_else(|b| b); + + if idx < self.0.len() { + let (set_id, block_number) = self.0[idx].clone(); + + // if this is the first index but not the first set id then we are missing data. + if idx == 0 && set_id != 0 { + return AuthoritySetChangeId::Unknown + } + + AuthoritySetChangeId::Set(set_id, block_number) + } else { + AuthoritySetChangeId::Unknown + } + } + + pub(crate) fn insert(&mut self, block_number: N) { + let idx = self + .0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) + .unwrap_or_else(|b| b); + + let set_id = if idx == 0 { 0 } else { self.0[idx - 1].0 + 1 }; + assert!(idx == self.0.len() || self.0[idx].0 != set_id); + self.0.insert(idx, (set_id, block_number)); + } + + /// Returns an iterator over all historical authority set changes starting at the given block + /// number (excluded). The iterator yields a tuple representing the set id and the block number + /// of the last block in that set. + pub fn iter_from(&self, block_number: N) -> Option> { + let idx = self + .0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) + // if there was a change at the given block number then we should start on the next + // index since we want to exclude the current block number + .map(|n| n + 1) + .unwrap_or_else(|b| b); + + if idx < self.0.len() { + let (set_id, _) = self.0[idx].clone(); + + // if this is the first index but not the first set id then we are missing data. + if idx == 0 && set_id != 0 { + return None + } + } + + Some(self.0[idx..].iter()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::crypto::{ByteArray, UncheckedFrom}; + + fn static_is_descendent_of(value: bool) -> impl Fn(&A, &A) -> Result { + move |_, _| Ok(value) + } + + fn is_descendent_of(f: F) -> impl Fn(&A, &A) -> Result + where + F: Fn(&A, &A) -> bool, + { + move |base, hash| Ok(f(base, hash)) + } + + #[test] + fn current_limit_filters_min() { + let current_authorities = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 1)]; + + let mut authorities = AuthoritySet { + current_authorities: current_authorities.clone(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let change = |height| PendingChange { + next_authorities: current_authorities.clone(), + delay: 0, + canon_height: height, + canon_hash: height.to_string(), + delay_kind: DelayKind::Finalized, + }; + + let is_descendent_of = static_is_descendent_of(false); + + authorities.add_pending_change(change(1), &is_descendent_of).unwrap(); + authorities.add_pending_change(change(2), &is_descendent_of).unwrap(); + + assert_eq!(authorities.current_limit(0), Some(1)); + + assert_eq!(authorities.current_limit(1), Some(1)); + + assert_eq!(authorities.current_limit(2), Some(2)); + + assert_eq!(authorities.current_limit(3), None); + } + + #[test] + fn changes_iterated_in_pre_order() { + let current_authorities = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 1)]; + + let mut authorities = AuthoritySet { + current_authorities: current_authorities.clone(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let change_a = PendingChange { + next_authorities: current_authorities.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: current_authorities.clone(), + delay: 0, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + let change_c = PendingChange { + next_authorities: current_authorities.clone(), + delay: 5, + canon_height: 10, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change( + change_c.clone(), + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + ) + .unwrap(); + + // forced changes are iterated last + let change_d = PendingChange { + next_authorities: current_authorities.clone(), + delay: 2, + canon_height: 1, + canon_hash: "hash_d", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + let change_e = PendingChange { + next_authorities: current_authorities.clone(), + delay: 2, + canon_height: 0, + canon_hash: "hash_e", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + authorities + .add_pending_change(change_d.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_e.clone(), &static_is_descendent_of(false)) + .unwrap(); + + // ordered by subtree depth + assert_eq!( + authorities.pending_changes().collect::>(), + vec![&change_a, &change_c, &change_b, &change_e, &change_d], + ); + } + + #[test] + fn apply_change() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 5)]; + let set_b = vec![(AuthorityId::from_slice(&[2; 32]).unwrap(), 5)]; + + // two competing changes at the same height on different forks + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: set_b.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); + + assert_eq!(authorities.pending_changes().collect::>(), vec![&change_a, &change_b]); + + // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out + // "hash_b" + let status = authorities + .apply_standard_changes( + "hash_c", + 11, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + false, + None, + ) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, None); + assert_eq!(authorities.pending_changes().collect::>(), vec![&change_a]); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); + + // finalizing "hash_d" will enact the change signaled at "hash_a" + let status = authorities + .apply_standard_changes( + "hash_d", + 15, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + _ => unreachable!(), + }), + false, + None, + ) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_d", 15))); + + assert_eq!(authorities.current_authorities, set_a); + assert_eq!(authorities.set_id, 1); + assert_eq!(authorities.pending_changes().count(), 0); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); + } + + #[test] + fn disallow_multiple_changes_being_finalized_at_once() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 5)]; + let set_c = vec![(AuthorityId::from_slice(&[2; 32]).unwrap(), 5)]; + + // two competing changes at the same height on different forks + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_c = PendingChange { + next_authorities: set_c.clone(), + delay: 10, + canon_height: 30, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c.clone(), &static_is_descendent_of(true)) + .unwrap(); + + let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_b") => true, + ("hash_a", "hash_c") => true, + ("hash_a", "hash_d") => true, + + ("hash_c", "hash_b") => false, + ("hash_c", "hash_d") => true, + + ("hash_b", "hash_c") => true, + _ => unreachable!(), + }); + + // trying to finalize past `change_c` without finalizing `change_a` first + assert!(matches!( + authorities.apply_standard_changes("hash_d", 40, &is_descendent_of, false, None), + Err(Error::ForkTree(fork_tree::Error::UnfinalizedAncestor)) + )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); + + let status = authorities + .apply_standard_changes("hash_b", 15, &is_descendent_of, false, None) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_b", 15))); + + assert_eq!(authorities.current_authorities, set_a); + assert_eq!(authorities.set_id, 1); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); + + // after finalizing `change_a` it should be possible to finalize `change_c` + let status = authorities + .apply_standard_changes("hash_d", 40, &is_descendent_of, false, None) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_d", 40))); + + assert_eq!(authorities.current_authorities, set_c); + assert_eq!(authorities.set_id, 2); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 40)])); + } + + #[test] + fn enacts_standard_change_works() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 5)]; + + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 20, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); + + let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + ("hash_a", "hash_e") => true, + ("hash_b", "hash_d") => true, + ("hash_b", "hash_e") => true, + ("hash_a", "hash_c") => false, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }); + + // "hash_c" won't finalize the existing change since it isn't a descendent + assert_eq!( + authorities.enacts_standard_change("hash_c", 15, &is_descendent_of).unwrap(), + None, + ); + + // "hash_d" at depth 14 won't work either + assert_eq!( + authorities.enacts_standard_change("hash_d", 14, &is_descendent_of).unwrap(), + None, + ); + + // but it should work at depth 15 (change height + depth) + assert_eq!( + authorities.enacts_standard_change("hash_d", 15, &is_descendent_of).unwrap(), + Some(true), + ); + + // finalizing "hash_e" at depth 20 will trigger change at "hash_b", but + // it can't be applied yet since "hash_a" must be applied first + assert_eq!( + authorities.enacts_standard_change("hash_e", 30, &is_descendent_of).unwrap(), + Some(false), + ); + } + + #[test] + fn forced_changes() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 5)]; + let set_b = vec![(AuthorityId::from_slice(&[2; 32]).unwrap(), 5)]; + + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Best { median_last_finalized: 42 }, + }; + + let change_b = PendingChange { + next_authorities: set_b.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + authorities + .add_pending_change(change_a, &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); + + // no duplicates are allowed + assert!(matches!( + authorities.add_pending_change(change_b, &static_is_descendent_of(false)), + Err(Error::DuplicateAuthoritySetChange) + )); + + // there's an effective change triggered at block 15 but not a standard one. + // so this should do nothing. + assert_eq!( + authorities + .enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)) + .unwrap(), + None, + ); + + // there can only be one pending forced change per fork + let change_c = PendingChange { + next_authorities: set_b.clone(), + delay: 3, + canon_height: 8, + canon_hash: "hash_a8", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + let is_descendent_of_a = is_descendent_of(|base: &&str, _| base.starts_with("hash_a")); + + assert!(matches!( + authorities.add_pending_change(change_c, &is_descendent_of_a), + Err(Error::MultiplePendingForcedAuthoritySetChanges) + )); + + // let's try and apply the forced changes. + // too early and there's no forced changes to apply. + assert!(authorities + .apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false, None) + .unwrap() + .is_none()); + + // too late. + assert!(authorities + .apply_forced_changes("hash_a16", 16, &is_descendent_of_a, false, None) + .unwrap() + .is_none()); + + // on time -- chooses the right change for this fork. + assert_eq!( + authorities + .apply_forced_changes("hash_a15", 15, &is_descendent_of_a, false, None) + .unwrap() + .unwrap(), + ( + 42, + AuthoritySet { + current_authorities: set_a, + set_id: 1, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges(vec![(0, 42)]), + }, + ) + ); + } + + #[test] + fn forced_changes_with_no_delay() { + // NOTE: this is a regression test + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 5)]; + + // we create a forced change with no delay + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 0, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + // and import it + authorities + .add_pending_change(change_a, &static_is_descendent_of(false)) + .unwrap(); + + // it should be enacted at the same block that signaled it + assert!(authorities + .apply_forced_changes("hash_a", 5, &static_is_descendent_of(false), false, None) + .unwrap() + .is_some()); + } + + #[test] + fn forced_changes_blocked_by_standard_changes() { + let set_a = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 1)]; + + let mut authorities = AuthoritySet { + current_authorities: set_a.clone(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + // effective at #15 + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 5, + canon_height: 10, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + // effective #20 + let change_b = PendingChange { + next_authorities: set_a.clone(), + delay: 0, + canon_height: 20, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + // effective at #35 + let change_c = PendingChange { + next_authorities: set_a.clone(), + delay: 5, + canon_height: 30, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + // add some pending standard changes all on the same fork + authorities + .add_pending_change(change_a, &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b, &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c, &static_is_descendent_of(true)) + .unwrap(); + + // effective at #45 + let change_d = PendingChange { + next_authorities: set_a.clone(), + delay: 5, + canon_height: 40, + canon_hash: "hash_d", + delay_kind: DelayKind::Best { median_last_finalized: 31 }, + }; + + // now add a forced change on the same fork + authorities + .add_pending_change(change_d, &static_is_descendent_of(true)) + .unwrap(); + + // the forced change cannot be applied since the pending changes it depends on + // have not been applied yet. + assert!(matches!( + authorities.apply_forced_changes( + "hash_d45", + 45, + &static_is_descendent_of(true), + false, + None + ), + Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(15)) + )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); + + // we apply the first pending standard change at #15 + authorities + .apply_standard_changes("hash_a15", 15, &static_is_descendent_of(true), false, None) + .unwrap(); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); + + // but the forced change still depends on the next standard change + assert!(matches!( + authorities.apply_forced_changes( + "hash_d", + 45, + &static_is_descendent_of(true), + false, + None + ), + Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(20)) + )); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); + + // we apply the pending standard change at #20 + authorities + .apply_standard_changes("hash_b", 20, &static_is_descendent_of(true), false, None) + .unwrap(); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 20)])); + + // afterwards the forced change at #45 can already be applied since it signals + // that finality stalled at #31, and the next pending standard change is effective + // at #35. subsequent forced changes on the same branch must be kept + assert_eq!( + authorities + .apply_forced_changes("hash_d", 45, &static_is_descendent_of(true), false, None) + .unwrap() + .unwrap(), + ( + 31, + AuthoritySet { + current_authorities: set_a.clone(), + set_id: 3, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges(vec![(0, 15), (1, 20), (2, 31)]), + } + ), + ); + assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15), (1, 20)])); + } + + #[test] + fn next_change_works() { + let current_authorities = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 1)]; + + let mut authorities = AuthoritySet { + current_authorities: current_authorities.clone(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let new_set = current_authorities.clone(); + + // We have three pending changes with 2 possible roots that are enacted + // immediately on finality (i.e. standard changes). + let change_a0 = PendingChange { + next_authorities: new_set.clone(), + delay: 0, + canon_height: 5, + canon_hash: "hash_a0", + delay_kind: DelayKind::Finalized, + }; + + let change_a1 = PendingChange { + next_authorities: new_set.clone(), + delay: 0, + canon_height: 10, + canon_hash: "hash_a1", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: new_set.clone(), + delay: 0, + canon_height: 4, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + // A0 (#5) <- A10 (#8) <- A1 (#10) <- best_a + // B (#4) <- best_b + let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a0", "hash_a1") => true, + ("hash_a0", "best_a") => true, + ("hash_a1", "best_a") => true, + ("hash_a10", "best_a") => true, + ("hash_b", "best_b") => true, + _ => false, + }); + + // add the three pending changes + authorities.add_pending_change(change_b, &is_descendent_of).unwrap(); + authorities.add_pending_change(change_a0, &is_descendent_of).unwrap(); + authorities.add_pending_change(change_a1, &is_descendent_of).unwrap(); + + // the earliest change at block `best_a` should be the change at A0 (#5) + assert_eq!( + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), + Some(("hash_a0", 5)), + ); + + // the earliest change at block `best_b` should be the change at B (#4) + assert_eq!( + authorities.next_change(&"best_b", &is_descendent_of).unwrap(), + Some(("hash_b", 4)), + ); + + // we apply the change at A0 which should prune it and the fork at B + authorities + .apply_standard_changes("hash_a0", 5, &is_descendent_of, false, None) + .unwrap(); + + // the next change is now at A1 (#10) + assert_eq!( + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), + Some(("hash_a1", 10)), + ); + + // there's no longer any pending change at `best_b` fork + assert_eq!(authorities.next_change(&"best_b", &is_descendent_of).unwrap(), None); + + // we a forced change at A10 (#8) + let change_a10 = PendingChange { + next_authorities: new_set.clone(), + delay: 0, + canon_height: 8, + canon_hash: "hash_a10", + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + authorities + .add_pending_change(change_a10, &static_is_descendent_of(false)) + .unwrap(); + + // it should take precedence over the change at A1 (#10) + assert_eq!( + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), + Some(("hash_a10", 8)), + ); + } + + #[test] + fn maintains_authority_list_invariants() { + // empty authority lists are invalid + assert_eq!(AuthoritySet::<(), ()>::genesis(vec![]), None); + assert_eq!( + AuthoritySet::<(), ()>::new( + vec![], + 0, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ), + None, + ); + + let invalid_authorities_weight = vec![ + (AuthorityId::from_slice(&[1; 32]).unwrap(), 5), + (AuthorityId::from_slice(&[2; 32]).unwrap(), 0), + ]; + + // authority weight of zero is invalid + assert_eq!(AuthoritySet::<(), ()>::genesis(invalid_authorities_weight.clone()), None); + assert_eq!( + AuthoritySet::<(), ()>::new( + invalid_authorities_weight.clone(), + 0, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ), + None, + ); + + let mut authority_set = + AuthoritySet::<(), u64>::genesis(vec![(AuthorityId::unchecked_from([1; 32]), 5)]) + .unwrap(); + + let invalid_change_empty_authorities = PendingChange { + next_authorities: vec![], + delay: 10, + canon_height: 5, + canon_hash: (), + delay_kind: DelayKind::Finalized, + }; + + // pending change contains an empty authority set + assert!(matches!( + authority_set.add_pending_change( + invalid_change_empty_authorities.clone(), + &static_is_descendent_of(false) + ), + Err(Error::InvalidAuthoritySet) + )); + + let invalid_change_authorities_weight = PendingChange { + next_authorities: invalid_authorities_weight, + delay: 10, + canon_height: 5, + canon_hash: (), + delay_kind: DelayKind::Best { median_last_finalized: 0 }, + }; + + // pending change contains an an authority set + // where one authority has weight of 0 + assert!(matches!( + authority_set.add_pending_change( + invalid_change_authorities_weight, + &static_is_descendent_of(false) + ), + Err(Error::InvalidAuthoritySet) + )); + } + + #[test] + fn cleans_up_stale_forced_changes_when_applying_standard_change() { + let current_authorities = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 1)]; + + let mut authorities = AuthoritySet { + current_authorities: current_authorities.clone(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + authority_set_changes: AuthoritySetChanges::empty(), + }; + + let new_set = current_authorities.clone(); + + // Create the following pending changes tree: + // + // [#C3] + // / + // /- (#C2) + // / + // (#A) - (#B) - [#C1] + // \ + // (#C0) - [#D] + // + // () - Standard change + // [] - Forced change + + let is_descendent_of = { + let hashes = vec!["B", "C0", "C1", "C2", "C3", "D"]; + is_descendent_of(move |base, hash| match (*base, *hash) { + ("B", "B") => false, // required to have the simpler case below + ("A", b) | ("B", b) => hashes.iter().any(|h| *h == b), + ("C0", "D") => true, + _ => false, + }) + }; + + let mut add_pending_change = |canon_height, canon_hash, forced| { + let change = PendingChange { + next_authorities: new_set.clone(), + delay: 0, + canon_height, + canon_hash, + delay_kind: if forced { + DelayKind::Best { median_last_finalized: 0 } + } else { + DelayKind::Finalized + }, + }; + + authorities.add_pending_change(change, &is_descendent_of).unwrap(); + }; + + add_pending_change(5, "A", false); + add_pending_change(10, "B", false); + add_pending_change(15, "C0", false); + add_pending_change(15, "C1", true); + add_pending_change(15, "C2", false); + add_pending_change(15, "C3", true); + add_pending_change(20, "D", true); + + // applying the standard change at A should not prune anything + // other then the change that was applied + authorities + .apply_standard_changes("A", 5, &is_descendent_of, false, None) + .unwrap(); + + assert_eq!(authorities.pending_changes().count(), 6); + + // same for B + authorities + .apply_standard_changes("B", 10, &is_descendent_of, false, None) + .unwrap(); + + assert_eq!(authorities.pending_changes().count(), 5); + + let authorities2 = authorities.clone(); + + // finalizing C2 should clear all forced changes + authorities + .apply_standard_changes("C2", 15, &is_descendent_of, false, None) + .unwrap(); + + assert_eq!(authorities.pending_forced_changes.len(), 0); + + // finalizing C0 should clear all forced changes but D + let mut authorities = authorities2; + authorities + .apply_standard_changes("C0", 15, &is_descendent_of, false, None) + .unwrap(); + + assert_eq!(authorities.pending_forced_changes.len(), 1); + assert_eq!(authorities.pending_forced_changes.first().unwrap().canon_hash, "D"); + } + + #[test] + fn authority_set_changes_insert() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 41); + authority_set_changes.append(1, 81); + authority_set_changes.append(4, 121); + + authority_set_changes.insert(101); + assert_eq!(authority_set_changes.get_set_id(100), AuthoritySetChangeId::Set(2, 101)); + assert_eq!(authority_set_changes.get_set_id(101), AuthoritySetChangeId::Set(2, 101)); + } + + #[test] + fn authority_set_changes_for_complete_data() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 41); + authority_set_changes.append(1, 81); + authority_set_changes.append(2, 121); + + assert_eq!(authority_set_changes.get_set_id(20), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(40), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(41), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(42), AuthoritySetChangeId::Set(1, 81)); + assert_eq!(authority_set_changes.get_set_id(141), AuthoritySetChangeId::Latest); + } + + #[test] + fn authority_set_changes_for_incomplete_data() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(2, 41); + authority_set_changes.append(3, 81); + authority_set_changes.append(4, 121); + + assert_eq!(authority_set_changes.get_set_id(20), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(40), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(41), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(42), AuthoritySetChangeId::Set(3, 81)); + assert_eq!(authority_set_changes.get_set_id(141), AuthoritySetChangeId::Latest); + } + + #[test] + fn iter_from_works() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(1, 41); + authority_set_changes.append(2, 81); + + // we are missing the data for the first set, therefore we should return `None` + assert_eq!(None, authority_set_changes.iter_from(40).map(|it| it.collect::>())); + + // after adding the data for the first set the same query should work + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 21); + authority_set_changes.append(1, 41); + authority_set_changes.append(2, 81); + authority_set_changes.append(3, 121); + + assert_eq!( + Some(vec![(1, 41), (2, 81), (3, 121)]), + authority_set_changes.iter_from(40).map(|it| it.cloned().collect::>()), + ); + + assert_eq!( + Some(vec![(2, 81), (3, 121)]), + authority_set_changes.iter_from(41).map(|it| it.cloned().collect::>()), + ); + + assert_eq!(0, authority_set_changes.iter_from(121).unwrap().count()); + + assert_eq!(0, authority_set_changes.iter_from(200).unwrap().count()); + } +} diff --git a/substrate/client/consensus/grandpa/src/aux_schema.rs b/substrate/client/consensus/grandpa/src/aux_schema.rs new file mode 100644 index 00000000..97a8bc66 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/aux_schema.rs @@ -0,0 +1,789 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Schema for stuff in the aux-db. + +use std::fmt::Debug; + +use finality_grandpa::round::State as RoundState; +use log::{info, warn}; +use parity_scale_codec::{Decode, Encode}; + +use fork_tree::ForkTree; +use sc_client_api::backend::AuxStore; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_consensus_grandpa::{AuthorityList, RoundNumber, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +use crate::{ + authorities::{ + AuthoritySet, AuthoritySetChanges, DelayKind, PendingChange, SharedAuthoritySet, + }, + environment::{ + CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, + VoterSetState, + }, + GrandpaJustification, NewAuthoritySet, LOG_TARGET, +}; + +const VERSION_KEY: &[u8] = b"grandpa_schema_version"; +const SET_STATE_KEY: &[u8] = b"grandpa_completed_round"; +const CONCLUDED_ROUNDS: &[u8] = b"grandpa_concluded_rounds"; +const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; +const BEST_JUSTIFICATION: &[u8] = b"grandpa_best_justification"; + +const CURRENT_VERSION: u32 = 3; + +/// The voter set state. +#[derive(Debug, Clone, Encode, Decode)] +#[cfg_attr(test, derive(PartialEq))] +pub enum V1VoterSetState { + /// The voter set state, currently paused. + Paused(RoundNumber, RoundState), + /// The voter set state, currently live. + Live(RoundNumber, RoundState), +} + +type V0VoterSetState = (RoundNumber, RoundState); + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +struct V0PendingChange { + next_authorities: AuthorityList, + delay: N, + canon_height: N, + canon_hash: H, +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +struct V0AuthoritySet { + current_authorities: AuthorityList, + set_id: SetId, + pending_changes: Vec>, +} + +impl Into> for V0AuthoritySet +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, +{ + fn into(self) -> AuthoritySet { + let mut pending_standard_changes = ForkTree::new(); + + for old_change in self.pending_changes { + let new_change = PendingChange { + next_authorities: old_change.next_authorities, + delay: old_change.delay, + canon_height: old_change.canon_height, + canon_hash: old_change.canon_hash, + delay_kind: DelayKind::Finalized, + }; + + if let Err(err) = pending_standard_changes.import::<_, ClientError>( + new_change.canon_hash.clone(), + new_change.canon_height.clone(), + new_change, + // previously we only supported at most one pending change per fork + &|_, _| Ok(false), + ) { + warn!(target: LOG_TARGET, "Error migrating pending authority set change: {}", err); + warn!(target: LOG_TARGET, "Node is in a potentially inconsistent state."); + } + } + + let authority_set = AuthoritySet::new( + self.current_authorities, + self.set_id, + pending_standard_changes, + Vec::new(), + AuthoritySetChanges::empty(), + ); + + authority_set.expect("current_authorities is non-empty and weights are non-zero; qed.") + } +} + +impl Into> for V2AuthoritySet +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, +{ + fn into(self) -> AuthoritySet { + AuthoritySet::new( + self.current_authorities, + self.set_id, + self.pending_standard_changes, + self.pending_forced_changes, + AuthoritySetChanges::empty(), + ) + .expect("current_authorities is non-empty and weights are non-zero; qed.") + } +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +struct V2AuthoritySet { + current_authorities: AuthorityList, + set_id: u64, + pending_standard_changes: ForkTree>, + pending_forced_changes: Vec>, +} + +pub(crate) fn load_decode( + backend: &B, + key: &[u8], +) -> ClientResult> { + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]) + .map_err(|e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e))) + .map(Some), + } +} + +/// Persistent data kept between runs. +pub(crate) struct PersistentData { + pub(crate) authority_set: SharedAuthoritySet>, + pub(crate) set_state: SharedVoterSetState, +} + +fn migrate_from_version0( + backend: &B, + genesis_round: &G, +) -> ClientResult>, VoterSetState)>> +where + B: AuxStore, + G: Fn() -> RoundState>, +{ + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; + + if let Some(old_set) = + load_decode::<_, V0AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { + let new_set: AuthoritySet> = old_set.into(); + backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; + + let (last_round_number, last_round_state) = match load_decode::< + _, + V0VoterSetState>, + >(backend, SET_STATE_KEY)? + { + Some((number, state)) => (number, state), + None => (0, genesis_round()), + }; + + let set_id = new_set.set_id; + + let base = last_round_state.prevote_ghost.expect( + "state is for completed round; completed rounds must have a prevote ghost; qed.", + ); + + let mut current_rounds = CurrentRounds::::new(); + current_rounds.insert(last_round_number + 1, HasVoted::No); + + let set_state = VoterSetState::Live { + completed_rounds: CompletedRounds::new( + CompletedRound { + number: last_round_number, + state: last_round_state, + votes: Vec::new(), + base, + }, + set_id, + &new_set, + ), + current_rounds, + }; + + backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; + + return Ok(Some((new_set, set_state))) + } + + Ok(None) +} + +fn migrate_from_version1( + backend: &B, + genesis_round: &G, +) -> ClientResult>, VoterSetState)>> +where + B: AuxStore, + G: Fn() -> RoundState>, +{ + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; + + if let Some(set) = + load_decode::<_, AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { + let set_id = set.set_id; + + let completed_rounds = |number, state, base| { + CompletedRounds::new( + CompletedRound { number, state, votes: Vec::new(), base }, + set_id, + &set, + ) + }; + + let set_state = match load_decode::<_, V1VoterSetState>>( + backend, + SET_STATE_KEY, + )? { + Some(V1VoterSetState::Paused(last_round_number, set_state)) => { + let base = set_state.prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + VoterSetState::Paused { + completed_rounds: completed_rounds(last_round_number, set_state, base), + } + }, + Some(V1VoterSetState::Live(last_round_number, set_state)) => { + let base = set_state.prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + let mut current_rounds = CurrentRounds::::new(); + current_rounds.insert(last_round_number + 1, HasVoted::No); + + VoterSetState::Live { + completed_rounds: completed_rounds(last_round_number, set_state, base), + current_rounds, + } + }, + None => { + let set_state = genesis_round(); + let base = set_state.prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + VoterSetState::live(set_id, &set, base) + }, + }; + + backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; + + return Ok(Some((set, set_state))) + } + + Ok(None) +} + +fn migrate_from_version2( + backend: &B, + genesis_round: &G, +) -> ClientResult>, VoterSetState)>> +where + B: AuxStore, + G: Fn() -> RoundState>, +{ + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; + + if let Some(old_set) = + load_decode::<_, V2AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { + let new_set: AuthoritySet> = old_set.into(); + backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; + + let set_state = match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { + Some(state) => state, + None => { + let state = genesis_round(); + let base = state.prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + VoterSetState::live(new_set.set_id, &new_set, base) + }, + }; + + return Ok(Some((new_set, set_state))) + } + + Ok(None) +} + +/// Load or initialize persistent data from backend. +pub(crate) fn load_persistent( + backend: &B, + genesis_hash: Block::Hash, + genesis_number: NumberFor, + genesis_authorities: G, +) -> ClientResult> +where + B: AuxStore, + G: FnOnce() -> ClientResult, +{ + let version: Option = load_decode(backend, VERSION_KEY)?; + + let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); + + match version { + None => { + if let Some((new_set, set_state)) = + migrate_from_version0::(backend, &make_genesis_round)? + { + return Ok(PersistentData { + authority_set: new_set.into(), + set_state: set_state.into(), + }) + } + }, + Some(1) => { + if let Some((new_set, set_state)) = + migrate_from_version1::(backend, &make_genesis_round)? + { + return Ok(PersistentData { + authority_set: new_set.into(), + set_state: set_state.into(), + }) + } + }, + Some(2) => { + if let Some((new_set, set_state)) = + migrate_from_version2::(backend, &make_genesis_round)? + { + return Ok(PersistentData { + authority_set: new_set.into(), + set_state: set_state.into(), + }) + } + }, + Some(3) => { + if let Some(set) = load_decode::<_, AuthoritySet>>( + backend, + AUTHORITY_SET_KEY, + )? { + let set_state = + match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { + Some(state) => state, + None => { + let state = make_genesis_round(); + let base = state.prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + VoterSetState::live(set.set_id, &set, base) + }, + }; + + return Ok(PersistentData { authority_set: set.into(), set_state: set_state.into() }) + } + }, + Some(other) => + return Err(ClientError::Backend(format!("Unsupported GRANDPA DB version: {:?}", other))), + } + + // genesis. + info!( + target: LOG_TARGET, + "👴 Loading GRANDPA authority set \ + from genesis on what appears to be first startup." + ); + + let genesis_authorities = genesis_authorities()?; + let genesis_set = AuthoritySet::genesis(genesis_authorities) + .expect("genesis authorities is non-empty; all weights are non-zero; qed."); + let state = make_genesis_round(); + let base = state + .prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + let genesis_state = VoterSetState::live(0, &genesis_set, base); + + backend.insert_aux( + &[ + (AUTHORITY_SET_KEY, genesis_set.encode().as_slice()), + (SET_STATE_KEY, genesis_state.encode().as_slice()), + ], + &[], + )?; + + Ok(PersistentData { authority_set: genesis_set.into(), set_state: genesis_state.into() }) +} + +/// Update the authority set on disk after a change. +/// +/// If there has just been a handoff, pass a `new_set` parameter that describes the +/// handoff. `set` in all cases should reflect the current authority set, with all +/// changes and handoffs applied. +pub(crate) fn update_authority_set( + set: &AuthoritySet>, + new_set: Option<&NewAuthoritySet>>, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + // write new authority set state to disk. + let encoded_set = set.encode(); + + if let Some(new_set) = new_set { + // we also overwrite the "last completed round" entry with a blank slate + // because from the perspective of the finality gadget, the chain has + // reset. + let set_state = VoterSetState::::live( + new_set.set_id, + set, + (new_set.canon_hash, new_set.canon_number), + ); + let encoded = set_state.encode(); + + write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..]), (SET_STATE_KEY, &encoded[..])]) + } else { + write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) + } +} + +/// Update the justification for the latest finalized block on-disk. +/// +/// We always keep around the justification for the best finalized block and overwrite it +/// as we finalize new blocks, this makes sure that we don't store useless justifications +/// but can always prove finality of the latest block. +pub(crate) fn update_best_justification( + justification: &GrandpaJustification, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + let encoded_justification = justification.encode(); + write_aux(&[(BEST_JUSTIFICATION, &encoded_justification[..])]) +} + +/// Fetch the justification for the latest block finalized by GRANDPA, if any. +pub fn best_justification( + backend: &B, +) -> ClientResult>> +where + B: AuxStore, + Block: BlockT, +{ + load_decode::<_, GrandpaJustification>(backend, BEST_JUSTIFICATION) +} + +/// Write voter set state. +pub(crate) fn write_voter_set_state( + backend: &B, + state: &VoterSetState, +) -> ClientResult<()> { + backend.insert_aux(&[(SET_STATE_KEY, state.encode().as_slice())], &[]) +} + +/// Write concluded round. +pub(crate) fn write_concluded_round( + backend: &B, + round_data: &CompletedRound, +) -> ClientResult<()> { + let mut key = CONCLUDED_ROUNDS.to_vec(); + let round_number = round_data.number; + round_number.using_encoded(|n| key.extend(n)); + + backend.insert_aux(&[(&key[..], round_data.encode().as_slice())], &[]) +} + +#[cfg(test)] +pub(crate) fn load_authorities( + backend: &B, +) -> Option> { + load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY).expect("backend error") +} + +#[cfg(test)] +mod test { + use super::*; + use sp_consensus_grandpa::AuthorityId; + use sp_core::{crypto::UncheckedFrom, H256}; + use substrate_test_runtime_client::{self, runtime::Block}; + + fn dummy_id() -> AuthorityId { + AuthorityId::unchecked_from([1; 32]) + } + + #[test] + fn load_decode_from_v0_migrates_data_format() { + let client = substrate_test_runtime_client::new(); + + let authorities = vec![(dummy_id(), 100)]; + let set_id = 3; + let round_number: RoundNumber = 42; + let round_state = RoundState:: { + prevote_ghost: Some((H256::random(), 32)), + finalized: None, + estimate: None, + completable: false, + }; + + { + let authority_set = V0AuthoritySet:: { + current_authorities: authorities.clone(), + pending_changes: Vec::new(), + set_id, + }; + + let voter_set_state = (round_number, round_state.clone()); + + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + ], + &[], + ) + .unwrap(); + } + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), None); + + // should perform the migration + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3)); + + let PersistentData { authority_set, set_state, .. } = + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!( + *authority_set.inner(), + AuthoritySet::new( + authorities.clone(), + set_id, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .unwrap(), + ); + + let mut current_rounds = CurrentRounds::::new(); + current_rounds.insert(round_number + 1, HasVoted::No); + + assert_eq!( + &*set_state.read(), + &VoterSetState::Live { + completed_rounds: CompletedRounds::new( + CompletedRound { + number: round_number, + state: round_state.clone(), + base: round_state.prevote_ghost.unwrap(), + votes: vec![], + }, + set_id, + &*authority_set.inner(), + ), + current_rounds, + }, + ); + } + + #[test] + fn load_decode_from_v1_migrates_data_format() { + let client = substrate_test_runtime_client::new(); + + let authorities = vec![(dummy_id(), 100)]; + let set_id = 3; + let round_number: RoundNumber = 42; + let round_state = RoundState:: { + prevote_ghost: Some((H256::random(), 32)), + finalized: None, + estimate: None, + completable: false, + }; + + { + let authority_set = AuthoritySet::::new( + authorities.clone(), + set_id, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .unwrap(); + + let voter_set_state = V1VoterSetState::Live(round_number, round_state.clone()); + + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 1u32.encode().as_slice()), + ], + &[], + ) + .unwrap(); + } + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(1)); + + // should perform the migration + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3)); + + let PersistentData { authority_set, set_state, .. } = + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!( + *authority_set.inner(), + AuthoritySet::new( + authorities.clone(), + set_id, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .unwrap(), + ); + + let mut current_rounds = CurrentRounds::::new(); + current_rounds.insert(round_number + 1, HasVoted::No); + + assert_eq!( + &*set_state.read(), + &VoterSetState::Live { + completed_rounds: CompletedRounds::new( + CompletedRound { + number: round_number, + state: round_state.clone(), + base: round_state.prevote_ghost.unwrap(), + votes: vec![], + }, + set_id, + &*authority_set.inner(), + ), + current_rounds, + }, + ); + } + + #[test] + fn load_decode_from_v2_migrates_data_format() { + let client = substrate_test_runtime_client::new(); + + let authorities = vec![(dummy_id(), 100)]; + let set_id = 3; + + { + let authority_set = V2AuthoritySet:: { + current_authorities: authorities.clone(), + set_id, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let genesis_state = (H256::random(), 32); + let voter_set_state: VoterSetState = + VoterSetState::live( + set_id, + &authority_set.clone().into(), // Note the conversion! + genesis_state, + ); + + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 2u32.encode().as_slice()), + ], + &[], + ) + .unwrap(); + } + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(2)); + + // should perform the migration + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3)); + + let PersistentData { authority_set, .. } = load_persistent::< + substrate_test_runtime_client::runtime::Block, + _, + _, + >(&client, H256::random(), 0, || unreachable!()) + .unwrap(); + + assert_eq!( + *authority_set.inner(), + AuthoritySet::new( + authorities.clone(), + set_id, + ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .unwrap(), + ); + } + + #[test] + fn write_read_concluded_rounds() { + let client = substrate_test_runtime_client::new(); + let hash = H256::random(); + let round_state = RoundState::genesis((hash, 0)); + + let completed_round = CompletedRound:: { + number: 42, + state: round_state.clone(), + base: round_state.prevote_ghost.unwrap(), + votes: vec![], + }; + + assert!(write_concluded_round(&client, &completed_round).is_ok()); + + let round_number = completed_round.number; + let mut key = CONCLUDED_ROUNDS.to_vec(); + round_number.using_encoded(|n| key.extend(n)); + + assert_eq!( + load_decode::<_, CompletedRound::>( + &client, &key + ) + .unwrap(), + Some(completed_round), + ); + } +} diff --git a/substrate/client/consensus/grandpa/src/communication/gossip.rs b/substrate/client/consensus/grandpa/src/communication/gossip.rs new file mode 100644 index 00000000..c4c885d4 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/communication/gossip.rs @@ -0,0 +1,2659 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Gossip and politeness for polite-grandpa. +//! +//! This module implements the following message types: +//! #### Neighbor Packet +//! +//! The neighbor packet is sent to only our neighbors. It contains this information +//! +//! - Current Round +//! - Current voter set ID +//! - Last finalized hash from commit messages. +//! +//! If a peer is at a given voter set, it is impolite to send messages from +//! an earlier voter set. It is extremely impolite to send messages +//! from a future voter set. "future-set" messages can be dropped and ignored. +//! +//! If a peer is at round r, is impolite to send messages about r-2 or earlier and extremely +//! impolite to send messages about r+1 or later. "future-round" messages can +//! be dropped and ignored. +//! +//! It is impolite to send a neighbor packet which moves backwards or does not progress +//! protocol state. +//! +//! This is beneficial if it conveys some progress in the protocol state of the peer. +//! +//! #### Prevote / Precommit +//! +//! These are votes within a round. Noting that we receive these messages +//! from our peers who are not necessarily voters, we have to account the benefit +//! based on what they might have seen. +//! +//! #### Propose +//! +//! This is a broadcast by a known voter of the last-round estimate. +//! +//! #### Commit +//! +//! These are used to announce past agreement of finality. +//! +//! It is impolite to send commits which are earlier than the last commit +//! sent. It is especially impolite to send commits which are invalid, or from +//! a different Set ID than the receiving peer has indicated. +//! +//! Sending a commit is polite when it may finalize something that the receiving peer +//! was not aware of. +//! +//! #### Catch Up +//! +//! These allow a peer to request another peer, which they perceive to be in a +//! later round, to provide all the votes necessary to complete a given round +//! `R`. +//! +//! It is impolite to send a catch up request for a round `R` to a peer whose +//! announced view is behind `R`. It is also impolite to send a catch up request +//! to a peer in a new different Set ID. +//! +//! The logic for issuing and tracking pending catch up requests is implemented +//! in the `GossipValidator`. A catch up request is issued anytime we see a +//! neighbor packet from a peer at a round `CATCH_UP_THRESHOLD` higher than at +//! we are. +//! +//! ## Expiration +//! +//! We keep some amount of recent rounds' messages, but do not accept new ones from rounds +//! older than our current_round - 1. +//! +//! ## Message Validation +//! +//! We only send polite messages to peers, + +use ahash::{AHashMap, AHashSet}; +use log::{debug, trace}; +use parity_scale_codec::{Decode, Encode}; +use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; +use rand::seq::SliceRandom; +use sc_network::{PeerId, ReputationChange}; +use sc_network_common::role::ObservedRole; +use sc_network_gossip::{MessageIntent, ValidatorContext}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_consensus_grandpa::AuthorityId; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; + +use super::{benefit, cost, Round, SetId, NEIGHBOR_REBROADCAST_PERIOD}; +use crate::{environment, CatchUp, CompactCommit, SignedMessage, LOG_TARGET}; + +use std::{ + collections::{HashSet, VecDeque}, + time::{Duration, Instant}, +}; + +const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); +const CATCH_UP_REQUEST_TIMEOUT: Duration = Duration::from_secs(45); +const CATCH_UP_PROCESS_TIMEOUT: Duration = Duration::from_secs(30); +/// Maximum number of rounds we are behind a peer before issuing a +/// catch up request. +const CATCH_UP_THRESHOLD: u64 = 2; + +/// The total round duration measured in periods of gossip duration: +/// 2 gossip durations for prevote timer +/// 2 gossip durations for precommit timer +/// 1 gossip duration for precommits to spread +const ROUND_DURATION: u32 = 5; + +/// The period, measured in rounds, since the latest round start, after which we will start +/// propagating gossip messages to more nodes than just the lucky ones. +const PROPAGATION_SOME: f32 = 1.5; + +/// The period, measured in rounds, since the latest round start, after which we will start +/// propagating gossip messages to all the nodes we are connected to. +const PROPAGATION_ALL: f32 = 3.0; + +/// Assuming a network of 3000 nodes, using a fanout of 4, after about 6 iterations +/// of gossip a message has very likely reached all nodes on the network (`log4(3000)`). +const LUCKY_PEERS: usize = 4; + +type Report = (PeerId, ReputationChange); + +/// An outcome of examining a message. +#[derive(Debug, PartialEq, Clone, Copy)] +enum Consider { + /// Accept the message. + Accept, + /// Message is too early. Reject. + RejectPast, + /// Message is from the future. Reject. + RejectFuture, + /// Message cannot be evaluated. Reject. + RejectOutOfScope, +} + +/// A view of protocol state. +#[derive(Debug)] +struct View { + round: Round, // the current round we are at. + set_id: SetId, // the current voter set id. + last_commit: Option, // commit-finalized block height, if any. + last_update: Option, // last time we heard from peer, used for spamming detection. +} + +impl Default for View { + fn default() -> Self { + View { round: Round(1), set_id: SetId(0), last_commit: None, last_update: None } + } +} + +impl View { + /// Consider a round and set ID combination under a current view. + fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { + // only from current set + if set_id < self.set_id { + return Consider::RejectPast + } + if set_id > self.set_id { + return Consider::RejectFuture + } + + // only r-1 ... r+1 + if round.0 > self.round.0.saturating_add(1) { + return Consider::RejectFuture + } + if round.0 < self.round.0.saturating_sub(1) { + return Consider::RejectPast + } + + Consider::Accept + } + + /// Consider a set-id global message. Rounds are not taken into account, but are implicitly + /// because we gate on finalization of a further block than a previous commit. + fn consider_global(&self, set_id: SetId, number: N) -> Consider { + // only from current set + if set_id < self.set_id { + return Consider::RejectPast + } + if set_id > self.set_id { + return Consider::RejectFuture + } + + // only commits which claim to prove a higher block number than + // the one we're aware of. + match self.last_commit { + None => Consider::Accept, + Some(ref num) => + if num < &number { + Consider::Accept + } else { + Consider::RejectPast + }, + } + } +} + +/// A local view of protocol state. Similar to `View` but we additionally track +/// the round and set id at which the last commit was observed, and the instant +/// at which the current round started. +struct LocalView { + round: Round, + set_id: SetId, + last_commit: Option<(N, Round, SetId)>, + round_start: Instant, +} + +impl LocalView { + /// Creates a new `LocalView` at the given set id and round. + fn new(set_id: SetId, round: Round) -> LocalView { + LocalView { set_id, round, last_commit: None, round_start: Instant::now() } + } + + /// Converts the local view to a `View` discarding round and set id + /// information about the last commit. + fn as_view(&self) -> View<&N> { + View { + round: self.round, + set_id: self.set_id, + last_commit: self.last_commit_height(), + last_update: None, + } + } + + /// Update the set ID. implies a reset to round 1. + fn update_set(&mut self, set_id: SetId) { + if set_id != self.set_id { + self.set_id = set_id; + self.round = Round(1); + self.round_start = Instant::now(); + } + } + + /// Updates the current round. + fn update_round(&mut self, round: Round) { + self.round = round; + self.round_start = Instant::now(); + } + + /// Returns the height of the block that the last observed commit finalizes. + fn last_commit_height(&self) -> Option<&N> { + self.last_commit.as_ref().map(|(number, _, _)| number) + } +} + +const KEEP_RECENT_ROUNDS: usize = 3; + +/// Tracks gossip topics that we are keeping messages for. We keep topics of: +/// +/// - the last `KEEP_RECENT_ROUNDS` complete GRANDPA rounds, +/// +/// - the topic for the current and next round, +/// +/// - and a global topic for commit and catch-up messages. +struct KeepTopics { + current_set: SetId, + rounds: VecDeque<(Round, SetId)>, + reverse_map: AHashMap, SetId)>, +} + +impl KeepTopics { + fn new() -> Self { + KeepTopics { + current_set: SetId(0), + rounds: VecDeque::with_capacity(KEEP_RECENT_ROUNDS + 2), + reverse_map: Default::default(), + } + } + + fn push(&mut self, round: Round, set_id: SetId) { + self.current_set = std::cmp::max(self.current_set, set_id); + + // under normal operation the given round is already tracked (since we + // track one round ahead). if we skip rounds (with a catch up) the given + // round topic might not be tracked yet. + if !self.rounds.contains(&(round, set_id)) { + self.rounds.push_back((round, set_id)); + } + + // we also accept messages for the next round + self.rounds.push_back((Round(round.0.saturating_add(1)), set_id)); + + // the 2 is for the current and next round. + while self.rounds.len() > KEEP_RECENT_ROUNDS + 2 { + let _ = self.rounds.pop_front(); + } + + let mut map = AHashMap::with_capacity(KEEP_RECENT_ROUNDS + 3); + map.insert(super::global_topic::(self.current_set.0), (None, self.current_set)); + + for &(round, set) in &self.rounds { + map.insert(super::round_topic::(round.0, set.0), (Some(round), set)); + } + + self.reverse_map = map; + } + + fn topic_info(&self, topic: &B::Hash) -> Option<(Option, SetId)> { + self.reverse_map.get(topic).cloned() + } +} + +// topics to send to a neighbor based on their view. +fn neighbor_topics(view: &View>) -> Vec { + let s = view.set_id; + let mut topics = + vec![super::global_topic::(s.0), super::round_topic::(view.round.0, s.0)]; + + if view.round.0 != 0 { + let r = Round(view.round.0 - 1); + topics.push(super::round_topic::(r.0, s.0)) + } + + topics +} + +/// Grandpa gossip message type. +/// This is the root type that gets encoded and sent on the network. +#[derive(Debug, Encode, Decode)] +pub(super) enum GossipMessage { + /// Grandpa message with round and set info. + Vote(VoteMessage), + /// Grandpa commit message with round and set info. + Commit(FullCommitMessage), + /// A neighbor packet. Not repropagated. + Neighbor(VersionedNeighborPacket>), + /// Grandpa catch up request message with round and set info. Not repropagated. + CatchUpRequest(CatchUpRequestMessage), + /// Grandpa catch up message with round and set info. Not repropagated. + CatchUp(FullCatchUpMessage), +} + +impl From>> for GossipMessage { + fn from(neighbor: NeighborPacket>) -> Self { + GossipMessage::Neighbor(VersionedNeighborPacket::V1(neighbor)) + } +} + +/// Network level vote message with topic information. +#[derive(Debug, Encode, Decode)] +pub(super) struct VoteMessage { + /// The round this message is from. + pub(super) round: Round, + /// The voter set ID this message is from. + pub(super) set_id: SetId, + /// The message itself. + pub(super) message: SignedMessage, +} + +/// Network level commit message with topic information. +#[derive(Debug, Encode, Decode)] +pub(super) struct FullCommitMessage { + /// The round this message is from. + pub(super) round: Round, + /// The voter set ID this message is from. + pub(super) set_id: SetId, + /// The compact commit message. + pub(super) message: CompactCommit, +} + +/// V1 neighbor packet. Neighbor packets are sent from nodes to their peers +/// and are not repropagated. These contain information about the node's state. +#[derive(Debug, Encode, Decode, Clone)] +pub(super) struct NeighborPacket { + /// The round the node is currently at. + pub(super) round: Round, + /// The set ID the node is currently at. + pub(super) set_id: SetId, + /// The highest finalizing commit observed. + pub(super) commit_finalized_height: N, +} + +/// A versioned neighbor packet. +#[derive(Debug, Encode, Decode)] +pub(super) enum VersionedNeighborPacket { + #[codec(index = 1)] + V1(NeighborPacket), +} + +impl VersionedNeighborPacket { + fn into_neighbor_packet(self) -> NeighborPacket { + match self { + VersionedNeighborPacket::V1(p) => p, + } + } +} + +/// A catch up request for a given round (or any further round) localized by set id. +#[derive(Clone, Debug, Encode, Decode)] +pub(super) struct CatchUpRequestMessage { + /// The round that we want to catch up to. + pub(super) round: Round, + /// The voter set ID this message is from. + pub(super) set_id: SetId, +} + +/// Network level catch up message with topic information. +#[derive(Debug, Encode, Decode)] +pub(super) struct FullCatchUpMessage { + /// The voter set ID this message is from. + pub(super) set_id: SetId, + /// The compact commit message. + pub(super) message: CatchUp, +} + +/// Misbehavior that peers can perform. +/// +/// `cost` gives a cost that can be used to perform cost/benefit analysis of a +/// peer. +#[derive(Clone, Copy, Debug, PartialEq)] +pub(super) enum Misbehavior { + // invalid neighbor message, considering the last one. + InvalidViewChange, + // duplicate neighbor message. + DuplicateNeighborMessage, + // could not decode neighbor message. bytes-length of the packet. + UndecodablePacket(i32), + // Bad catch up message (invalid signatures). + BadCatchUpMessage { signatures_checked: i32 }, + // Bad commit message + BadCommitMessage { signatures_checked: i32, blocks_loaded: i32, equivocations_caught: i32 }, + // A message received that's from the future relative to our view. + // always misbehavior. + FutureMessage, + // A message received that cannot be evaluated relative to our view. + // This happens before we have a view and have sent out neighbor packets. + // always misbehavior. + OutOfScopeMessage, +} + +impl Misbehavior { + pub(super) fn cost(&self) -> ReputationChange { + use Misbehavior::*; + + match *self { + InvalidViewChange => cost::INVALID_VIEW_CHANGE, + DuplicateNeighborMessage => cost::DUPLICATE_NEIGHBOR_MESSAGE, + UndecodablePacket(bytes) => ReputationChange::new( + bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), + "Grandpa: Bad packet", + ), + BadCatchUpMessage { signatures_checked } => ReputationChange::new( + cost::PER_SIGNATURE_CHECKED.saturating_mul(signatures_checked), + "Grandpa: Bad cath-up message", + ), + BadCommitMessage { signatures_checked, blocks_loaded, equivocations_caught } => { + let cost = cost::PER_SIGNATURE_CHECKED + .saturating_mul(signatures_checked) + .saturating_add(cost::PER_BLOCK_LOADED.saturating_mul(blocks_loaded)); + + let benefit = equivocations_caught.saturating_mul(benefit::PER_EQUIVOCATION); + + ReputationChange::new( + (benefit as i32).saturating_add(cost as i32), + "Grandpa: Bad commit", + ) + }, + FutureMessage => cost::FUTURE_MESSAGE, + OutOfScopeMessage => cost::OUT_OF_SCOPE_MESSAGE, + } + } +} + +#[derive(Debug)] +struct PeerInfo { + view: View, + roles: ObservedRole, +} + +impl PeerInfo { + fn new(roles: ObservedRole) -> Self { + PeerInfo { view: View::default(), roles } + } +} + +/// The peers we're connected to in gossip. +struct Peers { + inner: AHashMap>, + /// The randomly picked set of `LUCKY_PEERS` we'll gossip to in the first stage of round + /// gossiping. + first_stage_peers: AHashSet, + /// The randomly picked set of peers we'll gossip to in the second stage of gossiping if the + /// first stage didn't allow us to spread the voting data enough to conclude the round. This + /// set should have size `sqrt(connected_peers)`. + second_stage_peers: HashSet, + /// The randomly picked set of `LUCKY_PEERS` light clients we'll gossip commit messages to. + lucky_light_peers: HashSet, + /// Neighbor packet rebroadcast period --- we reduce the reputation of peers sending duplicate + /// packets too often. + neighbor_rebroadcast_period: Duration, +} + +impl Peers { + fn new(neighbor_rebroadcast_period: Duration) -> Self { + Peers { + inner: Default::default(), + first_stage_peers: Default::default(), + second_stage_peers: Default::default(), + lucky_light_peers: Default::default(), + neighbor_rebroadcast_period, + } + } + + fn new_peer(&mut self, who: PeerId, role: ObservedRole) { + match role { + ObservedRole::Authority if self.first_stage_peers.len() < LUCKY_PEERS => { + self.first_stage_peers.insert(who); + }, + ObservedRole::Authority if self.second_stage_peers.len() < LUCKY_PEERS => { + self.second_stage_peers.insert(who); + }, + ObservedRole::Light if self.lucky_light_peers.len() < LUCKY_PEERS => { + self.lucky_light_peers.insert(who); + }, + _ => {}, + } + + self.inner.insert(who, PeerInfo::new(role)); + } + + fn peer_disconnected(&mut self, who: &PeerId) { + self.inner.remove(who); + // This does not happen often enough compared to round duration, + // so we don't reshuffle. + self.first_stage_peers.remove(who); + self.second_stage_peers.remove(who); + self.lucky_light_peers.remove(who); + } + + // returns a reference to the new view, if the peer is known. + fn update_peer_state( + &mut self, + who: &PeerId, + update: NeighborPacket, + ) -> Result>, Misbehavior> { + let peer = match self.inner.get_mut(who) { + None => return Ok(None), + Some(p) => p, + }; + + let invalid_change = peer.view.set_id > update.set_id || + peer.view.round > update.round && peer.view.set_id == update.set_id || + peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); + + if invalid_change { + return Err(Misbehavior::InvalidViewChange) + } + + let now = Instant::now(); + let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) == + (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); + + if duplicate_packet { + if let Some(last_update) = peer.view.last_update { + if now < last_update + self.neighbor_rebroadcast_period / 2 { + return Err(Misbehavior::DuplicateNeighborMessage) + } + } + } + + peer.view = View { + round: update.round, + set_id: update.set_id, + last_commit: Some(update.commit_finalized_height), + last_update: Some(now), + }; + + trace!( + target: LOG_TARGET, + "Peer {} updated view. Now at {:?}, {:?}", + who, + peer.view.round, + peer.view.set_id + ); + + Ok(Some(&peer.view)) + } + + fn update_commit_height(&mut self, who: &PeerId, new_height: N) -> Result<(), Misbehavior> { + let peer = match self.inner.get_mut(who) { + None => return Ok(()), + Some(p) => p, + }; + + // this doesn't allow a peer to send us unlimited commits with the + // same height, because there is still a misbehavior condition based on + // sending commits that are <= the best we are aware of. + if peer.view.last_commit.as_ref() > Some(&new_height) { + return Err(Misbehavior::InvalidViewChange) + } + + peer.view.last_commit = Some(new_height); + + Ok(()) + } + + fn peer<'a>(&'a self, who: &PeerId) -> Option<&'a PeerInfo> { + self.inner.get(who) + } + + fn reshuffle(&mut self) { + // we want to randomly select peers into three sets according to the following logic: + // - first set: LUCKY_PEERS random peers where at least LUCKY_PEERS/2 are authorities + // (unless + // we're not connected to that many authorities) + // - second set: max(LUCKY_PEERS, sqrt(peers)) peers where at least LUCKY_PEERS are + // authorities. + // - third set: LUCKY_PEERS random light client peers + + let shuffled_peers = { + let mut peers = + self.inner.iter().map(|(peer_id, info)| (*peer_id, info)).collect::>(); + + peers.shuffle(&mut rand::thread_rng()); + peers + }; + + let shuffled_authorities = shuffled_peers.iter().filter_map(|(peer_id, info)| { + if matches!(info.roles, ObservedRole::Authority) { + Some(peer_id) + } else { + None + } + }); + + let mut first_stage_peers = AHashSet::new(); + let mut second_stage_peers = HashSet::new(); + + // we start by allocating authorities to the first stage set and when the minimum of + // `LUCKY_PEERS / 2` is filled we start allocating to the second stage set. + let half_lucky = LUCKY_PEERS / 2; + let one_and_a_half_lucky = LUCKY_PEERS + half_lucky; + for (n_authorities_added, peer_id) in shuffled_authorities.enumerate() { + if n_authorities_added < half_lucky { + first_stage_peers.insert(*peer_id); + } else if n_authorities_added < one_and_a_half_lucky { + second_stage_peers.insert(*peer_id); + } else { + break + } + } + + // fill up first and second sets with remaining peers (either full or authorities) + // prioritizing filling the first set over the second. + let n_second_stage_peers = LUCKY_PEERS.max((shuffled_peers.len() as f32).sqrt() as usize); + for (peer_id, info) in &shuffled_peers { + if info.roles.is_light() { + continue + } + + if first_stage_peers.len() < LUCKY_PEERS { + first_stage_peers.insert(*peer_id); + second_stage_peers.remove(peer_id); + } else if second_stage_peers.len() < n_second_stage_peers { + if !first_stage_peers.contains(peer_id) { + second_stage_peers.insert(*peer_id); + } + } else { + break + } + } + + // pick `LUCKY_PEERS` random light peers + let lucky_light_peers = shuffled_peers + .into_iter() + .filter_map(|(peer_id, info)| if info.roles.is_light() { Some(peer_id) } else { None }) + .take(LUCKY_PEERS) + .collect(); + + self.first_stage_peers = first_stage_peers; + self.second_stage_peers = second_stage_peers; + self.lucky_light_peers = lucky_light_peers; + } +} + +#[derive(Debug, PartialEq)] +pub(super) enum Action { + // repropagate under given topic, to the given peers, applying cost/benefit to originator. + Keep(H, ReputationChange), + // discard and process. + ProcessAndDiscard(H, ReputationChange), + // discard, applying cost/benefit to originator. + Discard(ReputationChange), +} + +/// State of catch up request handling. +#[derive(Debug)] +enum PendingCatchUp { + /// No pending catch up requests. + None, + /// Pending catch up request which has not been answered yet. + Requesting { who: PeerId, request: CatchUpRequestMessage, instant: Instant }, + /// Pending catch up request that was answered and is being processed. + Processing { instant: Instant }, +} + +/// Configuration for the round catch-up mechanism. +enum CatchUpConfig { + /// Catch requests are enabled, our node will issue them whenever it sees a + /// neighbor packet for a round further than `CATCH_UP_THRESHOLD`. If + /// `only_from_authorities` is set, the node will only send catch-up + /// requests to other authorities it is connected to. This is useful if the + /// GRANDPA observer protocol is live on the network, in which case full + /// nodes (non-authorities) don't have the necessary round data to answer + /// catch-up requests. + Enabled { only_from_authorities: bool }, + /// Catch-up requests are disabled, our node will never issue them. This is + /// useful for the GRANDPA observer mode, where we are only interested in + /// commit messages and don't need to follow the full round protocol. + Disabled, +} + +impl CatchUpConfig { + fn enabled(only_from_authorities: bool) -> CatchUpConfig { + CatchUpConfig::Enabled { only_from_authorities } + } + + fn disabled() -> CatchUpConfig { + CatchUpConfig::Disabled + } + + fn request_allowed(&self, peer: &PeerInfo) -> bool { + match self { + CatchUpConfig::Disabled => false, + CatchUpConfig::Enabled { only_from_authorities, .. } => match peer.roles { + ObservedRole::Authority => true, + ObservedRole::Light => false, + ObservedRole::Full => !only_from_authorities, + }, + } + } +} + +struct Inner { + local_view: Option>>, + peers: Peers>, + live_topics: KeepTopics, + authorities: Vec, + config: crate::Config, + next_rebroadcast: Instant, + pending_catch_up: PendingCatchUp, + catch_up_config: CatchUpConfig, +} + +type MaybeMessage = Option<(Vec, NeighborPacket>)>; + +impl Inner { + fn new(config: crate::Config) -> Self { + let catch_up_config = if config.observer_enabled { + if config.local_role.is_authority() { + // since the observer protocol is enabled, we will only issue + // catch-up requests if we are an authority (and only to other + // authorities). + CatchUpConfig::enabled(true) + } else { + // otherwise, we are running the observer protocol and don't + // care about catch-up requests. + CatchUpConfig::disabled() + } + } else { + // if the observer protocol isn't enabled and we're not a light client, then any full + // node should be able to answer catch-up requests. + CatchUpConfig::enabled(false) + }; + + Inner { + local_view: None, + peers: Peers::new(NEIGHBOR_REBROADCAST_PERIOD), + live_topics: KeepTopics::new(), + next_rebroadcast: Instant::now() + REBROADCAST_AFTER, + authorities: Vec::new(), + pending_catch_up: PendingCatchUp::None, + catch_up_config, + config, + } + } + + /// Note a round in the current set has started. Does nothing if the last + /// call to the function was with the same `round`. + fn note_round(&mut self, round: Round) -> MaybeMessage { + let local_view = self.local_view.as_mut()?; + if local_view.round == round { + // Do not send neighbor packets out if `round` has not changed --- + // such behavior is punishable. + return None + } + + let set_id = local_view.set_id; + + debug!( + target: LOG_TARGET, + "Voter {} noting beginning of round {:?} to network.", + self.config.name(), + (round, set_id) + ); + + local_view.update_round(round); + + self.live_topics.push(round, set_id); + self.peers.reshuffle(); + + self.multicast_neighbor_packet(false) + } + + /// Note that a voter set with given ID has started. Does nothing if the last + /// call to the function was with the same `set_id`. + fn note_set(&mut self, set_id: SetId, authorities: Vec) -> MaybeMessage { + let local_view = match self.local_view { + ref mut x @ None => x.get_or_insert(LocalView::new(set_id, Round(1))), + Some(ref mut v) => { + if v.set_id == set_id { + let diff_authorities = self.authorities.iter().collect::>() != + authorities.iter().collect::>(); + + if diff_authorities { + debug!( + target: LOG_TARGET, + "Gossip validator noted set {:?} twice with different authorities. \ + Was the authority set hard forked?", + set_id, + ); + + self.authorities = authorities; + } + + // Do not send neighbor packets out if the `set_id` has not changed --- + // such behavior is punishable. + return None + } else { + v + } + }, + }; + + local_view.update_set(set_id); + self.live_topics.push(Round(1), set_id); + self.authorities = authorities; + + // when transitioning to a new set we also want to send neighbor packets to light clients, + // this is so that they know who to ask justifications from in order to finalize the last + // block in the previous set. + self.multicast_neighbor_packet(true) + } + + /// Note that we've imported a commit finalizing a given block. Does nothing if the last + /// call to the function was with the same or higher `finalized` number. + /// `set_id` & `round` are the ones the commit message is from. + fn note_commit_finalized( + &mut self, + round: Round, + set_id: SetId, + finalized: NumberFor, + ) -> MaybeMessage { + let local_view = self.local_view.as_mut()?; + if local_view.last_commit_height() < Some(&finalized) { + local_view.last_commit = Some((finalized, round, set_id)); + } else { + return None + } + + self.multicast_neighbor_packet(false) + } + + fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { + self.local_view + .as_ref() + .map(LocalView::as_view) + .map(|v| v.consider_vote(round, set_id)) + .unwrap_or(Consider::RejectOutOfScope) + } + + fn consider_global(&self, set_id: SetId, number: NumberFor) -> Consider { + self.local_view + .as_ref() + .map(LocalView::as_view) + .map(|v| v.consider_global(set_id, &number)) + .unwrap_or(Consider::RejectOutOfScope) + } + + fn cost_past_rejection( + &self, + _who: &PeerId, + _round: Round, + _set_id: SetId, + ) -> ReputationChange { + // hardcoded for now. + cost::PAST_REJECTION + } + + fn validate_round_message( + &self, + who: &PeerId, + full: &VoteMessage, + ) -> Action { + match self.consider_vote(full.round, full.set_id) { + Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::RejectPast => + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), + Consider::Accept => {}, + } + + // ensure authority is part of the set. + if !self.authorities.contains(&full.message.id) { + debug!(target: LOG_TARGET, "Message from unknown voter: {}", full.message.id); + telemetry!( + self.config.telemetry; + CONSENSUS_DEBUG; + "afg.bad_msg_signature"; + "signature" => ?full.message.id, + ); + return Action::Discard(cost::UNKNOWN_VOTER) + } + + if !sp_consensus_grandpa::check_message_signature( + &full.message.message, + &full.message.id, + &full.message.signature, + full.round.0, + full.set_id.0, + ) { + debug!(target: LOG_TARGET, "Bad message signature {}", full.message.id); + telemetry!( + self.config.telemetry; + CONSENSUS_DEBUG; + "afg.bad_msg_signature"; + "signature" => ?full.message.id, + ); + return Action::Discard(cost::BAD_SIGNATURE) + } + + let topic = super::round_topic::(full.round.0, full.set_id.0); + Action::Keep(topic, benefit::ROUND_MESSAGE) + } + + fn validate_commit_message( + &mut self, + who: &PeerId, + full: &FullCommitMessage, + ) -> Action { + if let Err(misbehavior) = self.peers.update_commit_height(who, full.message.target_number) { + return Action::Discard(misbehavior.cost()) + } + + match self.consider_global(full.set_id, full.message.target_number) { + Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), + Consider::RejectPast => + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::Accept => {}, + } + + if full.message.precommits.len() != full.message.auth_data.len() || + full.message.precommits.is_empty() + { + debug!(target: LOG_TARGET, "Malformed compact commit"); + telemetry!( + self.config.telemetry; + CONSENSUS_DEBUG; + "afg.malformed_compact_commit"; + "precommits_len" => ?full.message.precommits.len(), + "auth_data_len" => ?full.message.auth_data.len(), + "precommits_is_empty" => ?full.message.precommits.is_empty(), + ); + return Action::Discard(cost::MALFORMED_COMMIT) + } + + // always discard commits initially and rebroadcast after doing full + // checking. + let topic = super::global_topic::(full.set_id.0); + Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_COMMIT) + } + + fn validate_catch_up_message( + &mut self, + who: &PeerId, + full: &FullCatchUpMessage, + ) -> Action { + match &self.pending_catch_up { + PendingCatchUp::Requesting { who: peer, request, instant } => { + if peer != who { + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + } + + if request.set_id != full.set_id { + return Action::Discard(cost::MALFORMED_CATCH_UP) + } + + if request.round.0 > full.message.round_number { + return Action::Discard(cost::MALFORMED_CATCH_UP) + } + + if full.message.prevotes.is_empty() || full.message.precommits.is_empty() { + return Action::Discard(cost::MALFORMED_CATCH_UP) + } + + // move request to pending processing state, we won't push out + // any catch up requests until we import this one (either with a + // success or failure). + self.pending_catch_up = PendingCatchUp::Processing { instant: *instant }; + + // always discard catch up messages, they're point-to-point + let topic = super::global_topic::(full.set_id.0); + Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_CATCH_UP) + }, + _ => Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + } + } + + fn note_catch_up_message_processed(&mut self) { + match &self.pending_catch_up { + PendingCatchUp::Processing { .. } => { + self.pending_catch_up = PendingCatchUp::None; + }, + state => debug!( + target: LOG_TARGET, + "Noted processed catch up message when state was: {:?}", state, + ), + } + } + + fn handle_catch_up_request( + &mut self, + who: &PeerId, + request: CatchUpRequestMessage, + set_state: &environment::SharedVoterSetState, + ) -> (Option>, Action) { + let local_view = match self.local_view { + None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + Some(ref view) => view, + }; + + if request.set_id != local_view.set_id { + // NOTE: When we're close to a set change there is potentially a + // race where the peer sent us the request before it observed that + // we had transitioned to a new set. In this case we charge a lower + // cost. + if request.set_id.0.saturating_add(1) == local_view.set_id.0 && + local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 + { + return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)) + } + + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) + } + + match self.peers.peer(who) { + None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + Some(peer) if peer.view.round >= request.round => + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + _ => {}, + } + + let last_completed_round = set_state.read().last_completed_round(); + if last_completed_round.number < request.round.0 { + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) + } + + trace!( + target: LOG_TARGET, + "Replying to catch-up request for round {} from {} with round {}", + request.round.0, + who, + last_completed_round.number, + ); + + let mut prevotes = Vec::new(); + let mut precommits = Vec::new(); + + // NOTE: the set of votes stored in `LastCompletedRound` is a minimal + // set of votes, i.e. at most one equivocation is stored per voter. The + // code below assumes this invariant is maintained when creating the + // catch up reply since peers won't accept catch-up messages that have + // too many equivocations (we exceed the fault-tolerance bound). + for vote in last_completed_round.votes { + match vote.message { + finality_grandpa::Message::Prevote(prevote) => { + prevotes.push(finality_grandpa::SignedPrevote { + prevote, + signature: vote.signature, + id: vote.id, + }); + }, + finality_grandpa::Message::Precommit(precommit) => { + precommits.push(finality_grandpa::SignedPrecommit { + precommit, + signature: vote.signature, + id: vote.id, + }); + }, + _ => {}, + } + } + + let (base_hash, base_number) = last_completed_round.base; + + let catch_up = CatchUp:: { + round_number: last_completed_round.number, + prevotes, + precommits, + base_hash, + base_number, + }; + + let full_catch_up = GossipMessage::CatchUp::(FullCatchUpMessage { + set_id: request.set_id, + message: catch_up, + }); + + (Some(full_catch_up), Action::Discard(cost::CATCH_UP_REPLY)) + } + + fn try_catch_up(&mut self, who: &PeerId) -> (Option>, Option) { + let mut catch_up = None; + let mut report = None; + + // if the peer is on the same set and ahead of us by a margin bigger + // than `CATCH_UP_THRESHOLD` then we should ask it for a catch up + // message. we only send catch-up requests to authorities, observers + // won't be able to reply since they don't follow the full GRANDPA + // protocol and therefore might not have the vote data available. + if let (Some(peer), Some(local_view)) = (self.peers.peer(who), &self.local_view) { + if self.catch_up_config.request_allowed(peer) && + peer.view.set_id == local_view.set_id && + peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 + { + // send catch up request if allowed + let round = peer.view.round.0 - 1; // peer.view.round is > 0 + let request = + CatchUpRequestMessage { set_id: peer.view.set_id, round: Round(round) }; + + let (catch_up_allowed, catch_up_report) = self.note_catch_up_request(who, &request); + + if catch_up_allowed { + debug!( + target: LOG_TARGET, + "Sending catch-up request for round {} to {}", round, who, + ); + + catch_up = Some(GossipMessage::::CatchUpRequest(request)); + } + + report = catch_up_report; + } + } + + (catch_up, report) + } + + fn import_neighbor_message( + &mut self, + who: &PeerId, + update: NeighborPacket>, + ) -> (Vec, Action, Option>, Option) { + let update_res = self.peers.update_peer_state(who, update); + + let (cost_benefit, topics) = match update_res { + Ok(view) => + (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))), + Err(misbehavior) => (misbehavior.cost(), None), + }; + + let (catch_up, report) = match update_res { + Ok(_) => self.try_catch_up(who), + _ => (None, None), + }; + + let neighbor_topics = topics.unwrap_or_default(); + + // always discard neighbor messages, it's only valid for one hop. + let action = Action::Discard(cost_benefit); + + (neighbor_topics, action, catch_up, report) + } + + fn multicast_neighbor_packet(&self, force_light: bool) -> MaybeMessage { + self.local_view.as_ref().map(|local_view| { + let packet = NeighborPacket { + round: local_view.round, + set_id: local_view.set_id, + commit_finalized_height: *local_view.last_commit_height().unwrap_or(&Zero::zero()), + }; + + let peers = self + .peers + .inner + .iter() + .filter_map(|(id, info)| { + // light clients don't participate in the full GRANDPA voter protocol + // and therefore don't need to be informed about all view updates unless + // we explicitly require it (e.g. when transitioning to a new set) + if info.roles.is_light() && !force_light { + None + } else { + Some(id) + } + }) + .cloned() + .collect(); + + (peers, packet) + }) + } + + fn note_catch_up_request( + &mut self, + who: &PeerId, + catch_up_request: &CatchUpRequestMessage, + ) -> (bool, Option) { + let report = match &self.pending_catch_up { + PendingCatchUp::Requesting { who: peer, instant, .. } => { + if instant.elapsed() <= CATCH_UP_REQUEST_TIMEOUT { + return (false, None) + } else { + // report peer for timeout + Some((*peer, cost::CATCH_UP_REQUEST_TIMEOUT)) + } + }, + PendingCatchUp::Processing { instant, .. } => { + if instant.elapsed() < CATCH_UP_PROCESS_TIMEOUT { + return (false, None) + } else { + None + } + }, + _ => None, + }; + + self.pending_catch_up = PendingCatchUp::Requesting { + who: *who, + request: catch_up_request.clone(), + instant: Instant::now(), + }; + + (true, report) + } + + /// The initial logic for filtering round messages follows the given state + /// transitions: + /// + /// - State 1: allowed to LUCKY_PEERS random peers (where at least LUCKY_PEERS/2 are + /// authorities) + /// - State 2: allowed to max(LUCKY_PEERS, sqrt(random peers)) (where at least LUCKY_PEERS are + /// authorities) + /// - State 3: allowed to all peers + /// + /// Transitions will be triggered on repropagation attempts by the underlying gossip layer. + fn round_message_allowed(&self, who: &PeerId) -> bool { + let round_duration = self.config.gossip_duration * ROUND_DURATION; + let round_elapsed = match self.local_view { + Some(ref local_view) => local_view.round_start.elapsed(), + None => return false, + }; + + if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { + self.peers.first_stage_peers.contains(who) + } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) + } else { + self.peers.peer(who).map(|info| !info.roles.is_light()).unwrap_or(false) + } + } + + /// The initial logic for filtering global messages follows the given state + /// transitions: + /// + /// - State 1: allowed to max(LUCKY_PEERS, sqrt(peers)) (where at least LUCKY_PEERS are + /// authorities) + /// - State 2: allowed to all peers + /// + /// We are more lenient with global messages since there should be a lot + /// less global messages than round messages (just commits), and we want + /// these to propagate to non-authorities fast enough so that they can + /// observe finality. + /// + /// Transitions will be triggered on repropagation attempts by the + /// underlying gossip layer, which should happen every 30 seconds. + fn global_message_allowed(&self, who: &PeerId) -> bool { + let round_duration = self.config.gossip_duration * ROUND_DURATION; + let round_elapsed = match self.local_view { + Some(ref local_view) => local_view.round_start.elapsed(), + None => return false, + }; + + if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) || + self.peers.lucky_light_peers.contains(who) + } else { + true + } + } +} + +// Prometheus metrics for [`GossipValidator`]. +pub(crate) struct Metrics { + messages_validated: CounterVec, +} + +impl Metrics { + pub(crate) fn register( + registry: &prometheus_endpoint::Registry, + ) -> Result { + Ok(Self { + messages_validated: register( + CounterVec::new( + Opts::new( + "substrate_finality_grandpa_communication_gossip_validator_messages", + "Number of messages validated by the finality grandpa gossip validator.", + ), + &["message", "action"], + )?, + registry, + )?, + }) + } +} + +/// A validator for GRANDPA gossip messages. +pub(super) struct GossipValidator { + inner: parking_lot::RwLock>, + set_state: environment::SharedVoterSetState, + report_sender: TracingUnboundedSender, + metrics: Option, + telemetry: Option, +} + +impl GossipValidator { + /// Create a new gossip-validator. The current set is initialized to 0. If + /// `catch_up_enabled` is set to false then the validator will not issue any + /// catch up requests (useful e.g. when running just the GRANDPA observer). + pub(super) fn new( + config: crate::Config, + set_state: environment::SharedVoterSetState, + prometheus_registry: Option<&Registry>, + telemetry: Option, + ) -> (GossipValidator, TracingUnboundedReceiver) { + let metrics = match prometheus_registry.map(Metrics::register) { + Some(Ok(metrics)) => Some(metrics), + Some(Err(e)) => { + debug!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); + None + }, + None => None, + }; + + let (tx, rx) = tracing_unbounded("mpsc_grandpa_gossip_validator", 100_000); + let val = GossipValidator { + inner: parking_lot::RwLock::new(Inner::new(config)), + set_state, + report_sender: tx, + metrics, + telemetry, + }; + + (val, rx) + } + + /// Note a round in the current set has started. + pub(super) fn note_round(&self, round: Round, send_neighbor: F) + where + F: FnOnce(Vec, NeighborPacket>), + { + let maybe_msg = self.inner.write().note_round(round); + if let Some((to, msg)) = maybe_msg { + send_neighbor(to, msg); + } + } + + /// Note that a voter set with given ID has started. Updates the current set to given + /// value and initializes the round to 0. + pub(super) fn note_set(&self, set_id: SetId, authorities: Vec, send_neighbor: F) + where + F: FnOnce(Vec, NeighborPacket>), + { + let maybe_msg = self.inner.write().note_set(set_id, authorities); + if let Some((to, msg)) = maybe_msg { + send_neighbor(to, msg); + } + } + + /// Note that we've imported a commit finalizing a given block. + /// `set_id` & `round` are the ones the commit message is from and not necessarily + /// the latest set ID & round started. + pub(super) fn note_commit_finalized( + &self, + round: Round, + set_id: SetId, + finalized: NumberFor, + send_neighbor: F, + ) where + F: FnOnce(Vec, NeighborPacket>), + { + let maybe_msg = self.inner.write().note_commit_finalized(round, set_id, finalized); + + if let Some((to, msg)) = maybe_msg { + send_neighbor(to, msg); + } + } + + /// Note that we've processed a catch up message. + pub(super) fn note_catch_up_message_processed(&self) { + self.inner.write().note_catch_up_message_processed(); + } + + fn report(&self, who: PeerId, cost_benefit: ReputationChange) { + let _ = self.report_sender.unbounded_send(PeerReport { who, cost_benefit }); + } + + pub(super) fn do_validate( + &self, + who: &PeerId, + mut data: &[u8], + ) -> (Action, Vec, Option>) { + let mut broadcast_topics = Vec::new(); + let mut peer_reply = None; + + // Message name for Prometheus metric recording. + let message_name; + + let action = { + match GossipMessage::::decode(&mut data) { + Ok(GossipMessage::Vote(ref message)) => { + message_name = Some("vote"); + self.inner.write().validate_round_message(who, message) + }, + Ok(GossipMessage::Commit(ref message)) => { + message_name = Some("commit"); + self.inner.write().validate_commit_message(who, message) + }, + Ok(GossipMessage::Neighbor(update)) => { + message_name = Some("neighbor"); + let (topics, action, catch_up, report) = self + .inner + .write() + .import_neighbor_message(who, update.into_neighbor_packet()); + + if let Some((peer, cost_benefit)) = report { + self.report(peer, cost_benefit); + } + + broadcast_topics = topics; + peer_reply = catch_up; + action + }, + Ok(GossipMessage::CatchUp(ref message)) => { + message_name = Some("catch_up"); + self.inner.write().validate_catch_up_message(who, message) + }, + Ok(GossipMessage::CatchUpRequest(request)) => { + message_name = Some("catch_up_request"); + let (reply, action) = + self.inner.write().handle_catch_up_request(who, request, &self.set_state); + + peer_reply = reply; + action + }, + Err(e) => { + message_name = None; + debug!(target: LOG_TARGET, "Error decoding message: {}", e); + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.err_decoding_msg"; + "" => "", + ); + + let len = std::cmp::min(i32::MAX as usize, data.len()) as i32; + Action::Discard(Misbehavior::UndecodablePacket(len).cost()) + }, + } + }; + + // Prometheus metric recording. + if let (Some(metrics), Some(message_name)) = (&self.metrics, message_name) { + let action_name = match action { + Action::Keep(_, _) => "keep", + Action::ProcessAndDiscard(_, _) => "process_and_discard", + Action::Discard(_) => "discard", + }; + metrics.messages_validated.with_label_values(&[message_name, action_name]).inc(); + } + + (action, broadcast_topics, peer_reply) + } + + #[cfg(test)] + fn inner(&self) -> &parking_lot::RwLock> { + &self.inner + } +} + +impl sc_network_gossip::Validator for GossipValidator { + fn new_peer( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + roles: ObservedRole, + ) { + let packet = { + let mut inner = self.inner.write(); + inner.peers.new_peer(*who, roles); + + inner.local_view.as_ref().map(|v| NeighborPacket { + round: v.round, + set_id: v.set_id, + commit_finalized_height: *v.last_commit_height().unwrap_or(&Zero::zero()), + }) + }; + + if let Some(packet) = packet { + let packet_data = GossipMessage::::from(packet).encode(); + context.send_message(who, packet_data); + } + } + + fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, who: &PeerId) { + self.inner.write().peers.peer_disconnected(who); + } + + fn validate( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + data: &[u8], + ) -> sc_network_gossip::ValidationResult { + let (action, broadcast_topics, peer_reply) = self.do_validate(who, data); + + // not with lock held! + if let Some(msg) = peer_reply { + context.send_message(who, msg.encode()); + } + + for topic in broadcast_topics { + context.send_topic(who, topic, false); + } + + match action { + Action::Keep(topic, cb) => { + self.report(*who, cb); + context.broadcast_message(topic, data.to_vec(), false); + sc_network_gossip::ValidationResult::ProcessAndKeep(topic) + }, + Action::ProcessAndDiscard(topic, cb) => { + self.report(*who, cb); + sc_network_gossip::ValidationResult::ProcessAndDiscard(topic) + }, + Action::Discard(cb) => { + self.report(*who, cb); + sc_network_gossip::ValidationResult::Discard + }, + } + } + + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { + let (inner, do_rebroadcast) = { + use parking_lot::RwLockWriteGuard; + + let mut inner = self.inner.write(); + let now = Instant::now(); + let do_rebroadcast = if now >= inner.next_rebroadcast { + inner.next_rebroadcast = now + REBROADCAST_AFTER; + true + } else { + false + }; + + // downgrade to read-lock. + (RwLockWriteGuard::downgrade(inner), do_rebroadcast) + }; + + Box::new(move |who, intent, topic, mut data| { + if let MessageIntent::PeriodicRebroadcast = intent { + return do_rebroadcast + } + + let peer = match inner.peers.peer(who) { + None => return false, + Some(x) => x, + }; + + // if the topic is not something we're keeping at the moment, + // do not send. + let (maybe_round, set_id) = match inner.live_topics.topic_info(topic) { + None => return false, + Some(x) => x, + }; + + if let MessageIntent::Broadcast = intent { + if maybe_round.is_some() { + if !inner.round_message_allowed(who) { + // early return if the vote message isn't allowed at this stage. + return false + } + } else if !inner.global_message_allowed(who) { + // early return if the global message isn't allowed at this stage. + return false + } + } + + // if the topic is not something the peer accepts, discard. + if let Some(round) = maybe_round { + return peer.view.consider_vote(round, set_id) == Consider::Accept + } + + // global message. + let local_view = match inner.local_view { + Some(ref v) => v, + None => return false, // cannot evaluate until we have a local view. + }; + + match GossipMessage::::decode(&mut data) { + Err(_) => false, + Ok(GossipMessage::Commit(full)) => { + // we only broadcast commit messages if they're for the same + // set the peer is in and if the commit is better than the + // last received by peer, additionally we make sure to only + // broadcast our best commit. + peer.view.consider_global(set_id, full.message.target_number) == + Consider::Accept && Some(&full.message.target_number) == + local_view.last_commit_height() + }, + Ok(GossipMessage::Neighbor(_)) => false, + Ok(GossipMessage::CatchUpRequest(_)) => false, + Ok(GossipMessage::CatchUp(_)) => false, + Ok(GossipMessage::Vote(_)) => false, // should not be the case. + } + }) + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + let inner = self.inner.read(); + Box::new(move |topic, mut data| { + // if the topic is not one of the ones that we are keeping at the moment, + // it is expired. + match inner.live_topics.topic_info(&topic) { + None => return true, + // round messages don't require further checking. + Some((Some(_), _)) => return false, + Some((None, _)) => {}, + }; + + let local_view = match inner.local_view { + Some(ref v) => v, + None => return true, // no local view means we can't evaluate or hold any topic. + }; + + // global messages -- only keep the best commit. + match GossipMessage::::decode(&mut data) { + Err(_) => true, + Ok(GossipMessage::Commit(full)) => match local_view.last_commit { + Some((number, round, set_id)) => + // we expire any commit message that doesn't target the same block + // as our best commit or isn't from the same round and set id + !(full.message.target_number == number && + full.round == round && full.set_id == set_id), + None => true, + }, + Ok(_) => true, + } + }) + } +} + +/// Report specifying a reputation change for a given peer. +pub(super) struct PeerReport { + pub who: PeerId, + pub cost_benefit: ReputationChange, +} + +#[cfg(test)] +mod tests { + use super::{super::NEIGHBOR_REBROADCAST_PERIOD, environment::SharedVoterSetState, *}; + use crate::communication; + use sc_network::config::Role; + use sc_network_gossip::Validator as GossipValidatorT; + use sp_core::{crypto::UncheckedFrom, H256}; + use std::time::Instant; + use substrate_test_runtime_client::runtime::{Block, Header}; + + // some random config (not really needed) + fn config() -> crate::Config { + crate::Config { + gossip_duration: Duration::from_millis(10), + justification_generation_period: 256, + keystore: None, + name: None, + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: communication::grandpa_protocol_name::NAME.into(), + } + } + + // dummy voter set state + fn voter_set_state() -> SharedVoterSetState { + use crate::{authorities::AuthoritySet, environment::VoterSetState}; + + let base = (H256::zero(), 0); + + let voters = vec![(AuthorityId::unchecked_from([1; 32]), 1)]; + let voters = AuthoritySet::genesis(voters).unwrap(); + + let set_state = VoterSetState::live(0, &voters, base); + + set_state.into() + } + + #[test] + fn view_vote_rules() { + let view = View { + round: Round(100), + set_id: SetId(1), + last_commit: Some(1000u64), + last_update: None, + }; + + assert_eq!(view.consider_vote(Round(98), SetId(1)), Consider::RejectPast); + assert_eq!(view.consider_vote(Round(1), SetId(0)), Consider::RejectPast); + assert_eq!(view.consider_vote(Round(1000), SetId(0)), Consider::RejectPast); + + assert_eq!(view.consider_vote(Round(99), SetId(1)), Consider::Accept); + assert_eq!(view.consider_vote(Round(100), SetId(1)), Consider::Accept); + assert_eq!(view.consider_vote(Round(101), SetId(1)), Consider::Accept); + + assert_eq!(view.consider_vote(Round(102), SetId(1)), Consider::RejectFuture); + assert_eq!(view.consider_vote(Round(1), SetId(2)), Consider::RejectFuture); + assert_eq!(view.consider_vote(Round(1000), SetId(2)), Consider::RejectFuture); + } + + #[test] + fn view_global_message_rules() { + let view = View { + round: Round(100), + set_id: SetId(2), + last_commit: Some(1000u64), + last_update: None, + }; + + assert_eq!(view.consider_global(SetId(3), 1), Consider::RejectFuture); + assert_eq!(view.consider_global(SetId(3), 1000), Consider::RejectFuture); + assert_eq!(view.consider_global(SetId(3), 10000), Consider::RejectFuture); + + assert_eq!(view.consider_global(SetId(1), 1), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(1), 1000), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(1), 10000), Consider::RejectPast); + + assert_eq!(view.consider_global(SetId(2), 1), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(2), 1000), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(2), 1001), Consider::Accept); + assert_eq!(view.consider_global(SetId(2), 10000), Consider::Accept); + } + + #[test] + fn unknown_peer_cannot_be_updated() { + let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); + let id = PeerId::random(); + + let update = + NeighborPacket { round: Round(5), set_id: SetId(10), commit_finalized_height: 50 }; + + let res = peers.update_peer_state(&id, update.clone()); + assert!(res.unwrap().is_none()); + + // connect & disconnect. + peers.new_peer(id, ObservedRole::Authority); + peers.peer_disconnected(&id); + + let res = peers.update_peer_state(&id, update.clone()); + assert!(res.unwrap().is_none()); + } + + #[test] + fn update_peer_state() { + let update1 = + NeighborPacket { round: Round(5), set_id: SetId(10), commit_finalized_height: 50u32 }; + + let update2 = + NeighborPacket { round: Round(6), set_id: SetId(10), commit_finalized_height: 60 }; + + let update3 = + NeighborPacket { round: Round(2), set_id: SetId(11), commit_finalized_height: 61 }; + + let update4 = + NeighborPacket { round: Round(3), set_id: SetId(11), commit_finalized_height: 80 }; + + // Use shorter rebroadcast period to safely roll the clock back in the last test + // and don't hit the system boot time on systems with unsigned time. + const SHORT_NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(1); + let mut peers = Peers::new(SHORT_NEIGHBOR_REBROADCAST_PERIOD); + let id = PeerId::random(); + + peers.new_peer(id, ObservedRole::Authority); + + let check_update = |peers: &mut Peers<_>, update: NeighborPacket<_>| { + let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); + assert_eq!(view.round, update.round); + assert_eq!(view.set_id, update.set_id); + assert_eq!(view.last_commit, Some(update.commit_finalized_height)); + }; + + check_update(&mut peers, update1); + check_update(&mut peers, update2); + check_update(&mut peers, update3); + check_update(&mut peers, update4.clone()); + + // Allow duplicate neighbor packets if enough time has passed. + peers.inner.get_mut(&id).unwrap().view.last_update = + Some(Instant::now() - SHORT_NEIGHBOR_REBROADCAST_PERIOD); + check_update(&mut peers, update4); + } + + #[test] + fn invalid_view_change() { + let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); + + let id = PeerId::random(); + peers.new_peer(id, ObservedRole::Authority); + + peers + .update_peer_state( + &id, + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 10 }, + ) + .unwrap() + .unwrap(); + + let mut check_update = move |update: NeighborPacket<_>, misbehavior| { + let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); + assert_eq!(err, misbehavior); + }; + + // round moves backwards. + check_update( + NeighborPacket { round: Round(9), set_id: SetId(10), commit_finalized_height: 10 }, + Misbehavior::InvalidViewChange, + ); + // set ID moves backwards. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(9), commit_finalized_height: 10 }, + Misbehavior::InvalidViewChange, + ); + // commit finalized height moves backwards. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); + // duplicate packet without grace period. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 10 }, + Misbehavior::DuplicateNeighborMessage, + ); + // commit finalized height moves backwards while round moves forward. + check_update( + NeighborPacket { round: Round(11), set_id: SetId(10), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); + // commit finalized height moves backwards while set ID moves forward. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(11), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); + } + + #[test] + fn messages_not_expired_immediately() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + let set_id = 1; + + val.note_set(SetId(set_id), Vec::new(), |_, _| {}); + + for round_num in 1u64..10 { + val.note_round(Round(round_num), |_, _| {}); + } + + { + let mut is_expired = val.message_expired(); + let last_kept_round = 10u64 - KEEP_RECENT_ROUNDS as u64 - 1; + + // messages from old rounds are expired. + for round_num in 1u64..last_kept_round { + let topic = communication::round_topic::(round_num, 1); + assert!(is_expired(topic, &[1, 2, 3])); + } + + // messages from not-too-old rounds are not expired. + for round_num in last_kept_round..10 { + let topic = communication::round_topic::(round_num, 1); + assert!(!is_expired(topic, &[1, 2, 3])); + } + } + } + + #[test] + fn message_from_unknown_authority_discarded() { + assert!(cost::UNKNOWN_VOTER != cost::BAD_SIGNATURE); + + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + let set_id = 1; + let auth = AuthorityId::unchecked_from([1u8; 32]); + let peer = PeerId::random(); + + val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); + val.note_round(Round(1), |_, _| {}); + + let inner = val.inner.read(); + let unknown_voter = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage::
{ + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: UncheckedFrom::unchecked_from([1; 64]), + id: UncheckedFrom::unchecked_from([2u8; 32]), + }, + }, + ); + + let bad_sig = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage::
{ + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: UncheckedFrom::unchecked_from([1; 64]), + id: auth.clone(), + }, + }, + ); + + assert_eq!(unknown_voter, Action::Discard(cost::UNKNOWN_VOTER)); + assert_eq!(bad_sig, Action::Discard(cost::BAD_SIGNATURE)); + } + + #[test] + fn unsolicited_catch_up_messages_discarded() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + let set_id = 1; + let auth = AuthorityId::unchecked_from([1u8; 32]); + let peer = PeerId::random(); + + val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); + val.note_round(Round(1), |_, _| {}); + + let validate_catch_up = || { + let mut inner = val.inner.write(); + inner.validate_catch_up_message( + &peer, + &FullCatchUpMessage { + set_id: SetId(set_id), + message: finality_grandpa::CatchUp { + round_number: 10, + prevotes: Default::default(), + precommits: Default::default(), + base_hash: Default::default(), + base_number: Default::default(), + }, + }, + ) + }; + + // the catch up is discarded because we have no pending request + assert_eq!(validate_catch_up(), Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)); + + let noted = val.inner.write().note_catch_up_request( + &peer, + &CatchUpRequestMessage { set_id: SetId(set_id), round: Round(10) }, + ); + + assert!(noted.0); + + // catch up is allowed because we have requested it, but it's rejected + // because it's malformed (empty prevotes and precommits) + assert_eq!(validate_catch_up(), Action::Discard(cost::MALFORMED_CATCH_UP)); + } + + #[test] + fn unanswerable_catch_up_requests_discarded() { + // create voter set state with round 2 completed + let set_state: SharedVoterSetState = { + let mut completed_rounds = voter_set_state().read().completed_rounds(); + + completed_rounds.push(environment::CompletedRound { + number: 2, + state: finality_grandpa::round::State::genesis(Default::default()), + base: Default::default(), + votes: Default::default(), + }); + + let mut current_rounds = environment::CurrentRounds::::new(); + current_rounds.insert(3, environment::HasVoted::No); + + let set_state = + environment::VoterSetState::::Live { completed_rounds, current_rounds }; + + set_state.into() + }; + + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None, None); + + let set_id = 1; + let auth = AuthorityId::unchecked_from([1u8; 32]); + let peer = PeerId::random(); + + val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); + val.note_round(Round(3), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded + let mut inner = val.inner.write(); + inner.peers.new_peer(peer, ObservedRole::Authority); + + let res = inner.handle_catch_up_request( + &peer, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(10) }, + &set_state, + ); + + // we're at round 3, a catch up request for round 10 is out of scope + assert!(res.0.is_none()); + assert_eq!(res.1, Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)); + + let res = inner.handle_catch_up_request( + &peer, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(2) }, + &set_state, + ); + + // a catch up request for round 2 should be answered successfully + match res.0.unwrap() { + GossipMessage::CatchUp(catch_up) => { + assert_eq!(catch_up.set_id, SetId(set_id)); + assert_eq!(catch_up.message.round_number, 2); + + assert_eq!(res.1, Action::Discard(cost::CATCH_UP_REPLY)); + }, + _ => panic!("expected catch up message"), + }; + } + + #[test] + fn detects_honest_out_of_scope_catch_requests() { + let set_state = voter_set_state(); + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None, None); + + // the validator starts at set id 2 + val.note_set(SetId(2), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded + let peer = PeerId::random(); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); + + let send_request = |set_id, round| { + let mut inner = val.inner.write(); + inner.handle_catch_up_request( + &peer, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(round) }, + &set_state, + ) + }; + + let assert_res = |res: (Option<_>, Action<_>), honest| { + assert!(res.0.is_none()); + assert_eq!( + res.1, + if honest { + Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP) + } else { + Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + }, + ); + }; + + // the validator is at set id 2 and round 0. requests for set id 1 + // should not be answered but they should be considered an honest + // mistake + assert_res(send_request(1, 1), true); + + assert_res(send_request(1, 10), true); + + // requests for set id 0 should be considered out of scope + assert_res(send_request(0, 1), false); + + assert_res(send_request(0, 10), false); + + // after the validator progresses further than CATCH_UP_THRESHOLD in set + // id 2, any request for set id 1 should no longer be considered an + // honest mistake. + val.note_round(Round(3), |_, _| {}); + + assert_res(send_request(1, 1), false); + + assert_res(send_request(1, 2), false); + } + + #[test] + fn issues_catch_up_request_on_neighbor_packet_import() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded. + let peer = PeerId::random(); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); + + let import_neighbor_message = |set_id, round| { + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 42, + }, + ); + + catch_up_request + }; + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request for the previous round. + match import_neighbor_message(1, 42) { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + }, + _ => panic!("expected catch up message"), + } + + // we note that we're at round 41. + val.note_round(Round(41), |_, _| {}); + + // if we import a neighbor message within CATCH_UP_THRESHOLD then we + // won't request a catch up. + match import_neighbor_message(1, 42) { + None => {}, + _ => panic!("expected no catch up message"), + } + + // or if the peer is on a lower round. + match import_neighbor_message(1, 40) { + None => {}, + _ => panic!("expected no catch up message"), + } + + // we also don't request a catch up if the peer is in a different set. + match import_neighbor_message(2, 42) { + None => {}, + _ => panic!("expected no catch up message"), + } + } + + #[test] + fn doesnt_send_catch_up_requests_when_disabled() { + // we create a gossip validator with catch up requests disabled. + let config = { + let mut c = config(); + + // if the observer protocol is enabled and we are not an authority, + // then we don't issue any catch-up requests. + c.local_role = Role::Full; + c.observer_enabled = true; + + c + }; + + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded. + let peer = PeerId::random(); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request but since they're disabled + // we should get `None`. + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, + ); + + match catch_up_request { + None => {}, + _ => panic!("expected no catch up message"), + } + } + + #[test] + fn doesnt_send_catch_up_requests_to_non_authorities_when_observer_enabled() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peers making the requests to the validator, + // otherwise it is discarded. + let peer_authority = PeerId::random(); + let peer_full = PeerId::random(); + + val.inner.write().peers.new_peer(peer_authority, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); + + let import_neighbor_message = |peer| { + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, + ); + + catch_up_request + }; + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request but since the node is not an + // authority we should get `None`. + if import_neighbor_message(peer_full).is_some() { + panic!("expected no catch up message"); + } + + // importing the same neighbor message from a peer who is an authority + // should lead to a catch up request. + match import_neighbor_message(peer_authority) { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + }, + _ => panic!("expected catch up message"), + } + } + + #[test] + fn sends_catch_up_requests_to_non_authorities_when_observer_disabled() { + let config = { + let mut c = config(); + + // if the observer protocol is disable any full-node should be able + // to answer catch-up requests. + c.observer_enabled = false; + + c + }; + + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the requests to the validator, otherwise it is + // discarded. + let peer_full = PeerId::random(); + val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); + + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer_full, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, + ); + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request, the node is not an + // authority, but since the observer protocol is disabled we should + // issue a catch-up request to it anyway. + match catch_up_request { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + }, + _ => panic!("expected catch up message"), + } + } + + #[test] + fn doesnt_expire_next_round_messages() { + // NOTE: this is a regression test + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // we are at round 10 + val.note_round(Round(9), |_, _| {}); + val.note_round(Round(10), |_, _| {}); + + let mut is_expired = val.message_expired(); + + // we accept messages from rounds 9, 10 and 11 + // therefore neither of those should be considered expired + for round in &[9, 10, 11] { + assert!(!is_expired(communication::round_topic::(*round, 1), &[])) + } + } + + #[test] + fn progressively_gossips_to_more_peers_as_round_duration_increases() { + let mut config = config(); + config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race + let round_duration = config.gossip_duration * ROUND_DURATION; + + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); + + // the validator start at set id 0 + val.note_set(SetId(0), Vec::new(), |_, _| {}); + + // add 60 peers, 30 authorities and 30 full nodes + let mut authorities = Vec::new(); + authorities.resize_with(30, || PeerId::random()); + + let mut full_nodes = Vec::new(); + full_nodes.resize_with(30, || PeerId::random()); + + for i in 0..30 { + val.inner.write().peers.new_peer(authorities[i], ObservedRole::Authority); + + val.inner.write().peers.new_peer(full_nodes[i], ObservedRole::Full); + } + + let test = |rounds_elapsed, peers| { + // rewind n round durations + val.inner.write().local_view.as_mut().unwrap().round_start = Instant::now() - + Duration::from_millis( + (round_duration.as_millis() as f32 * rounds_elapsed) as u64, + ); + + val.inner.write().peers.reshuffle(); + + let mut message_allowed = val.message_allowed(); + + move || { + let mut allowed = 0; + for peer in peers { + if message_allowed( + peer, + MessageIntent::Broadcast, + &communication::round_topic::(1, 0), + &[], + ) { + allowed += 1; + } + } + allowed + } + }; + + fn trial usize>(mut test: F) -> usize { + let mut results = Vec::new(); + let n = 1000; + + for _ in 0..n { + results.push(test()); + } + + let n = results.len(); + let sum: usize = results.iter().sum(); + + sum / n + } + + let all_peers = authorities.iter().chain(full_nodes.iter()).cloned().collect(); + + // on the first attempt we will only gossip to 4 peers, either + // authorities or full nodes, but we'll guarantee that half of those + // are authorities + assert!(trial(test(1.0, &authorities)) >= LUCKY_PEERS / 2); + assert_eq!(trial(test(1.0, &all_peers)), LUCKY_PEERS); + + // after more than 1.5 round durations have elapsed we should gossip to + // `sqrt(peers)` we're connected to, but we guarantee that at least 4 of + // those peers are authorities (plus the `LUCKY_PEERS` from the previous + // stage) + assert!(trial(test(PROPAGATION_SOME * 1.1, &authorities)) >= LUCKY_PEERS); + assert_eq!( + trial(test(2.0, &all_peers)), + LUCKY_PEERS + (all_peers.len() as f64).sqrt() as usize, + ); + + // after 3 rounds durations we should gossip to all peers we are + // connected to + assert_eq!(trial(test(PROPAGATION_ALL * 1.1, &all_peers)), all_peers.len()); + } + + #[test] + fn never_gossips_round_messages_to_light_clients() { + let config = config(); + let round_duration = config.gossip_duration * ROUND_DURATION; + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); + + // the validator starts at set id 0 + val.note_set(SetId(0), Vec::new(), |_, _| {}); + + // add a new light client as peer + let light_peer = PeerId::random(); + + val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); + + assert!(!val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &communication::round_topic::(1, 0), + &[], + )); + + // we reverse the round start time so that the elapsed time is higher + // (which should lead to more peers getting the message) + val.inner.write().local_view.as_mut().unwrap().round_start = + Instant::now() - round_duration * 10; + + // even after the round has been going for 10 round durations we will never + // gossip to light clients + assert!(!val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &communication::round_topic::(1, 0), + &[], + )); + + // update the peer state and local state wrt commits + val.inner + .write() + .peers + .update_peer_state( + &light_peer, + NeighborPacket { round: Round(1), set_id: SetId(0), commit_finalized_height: 1 }, + ) + .unwrap(); + + val.note_commit_finalized(Round(1), SetId(0), 2, |_, _| {}); + + let commit = { + let commit = finality_grandpa::CompactCommit { + target_hash: H256::random(), + target_number: 2, + precommits: Vec::new(), + auth_data: Vec::new(), + }; + + communication::gossip::GossipMessage::::Commit( + communication::gossip::FullCommitMessage { + round: Round(2), + set_id: SetId(0), + message: commit, + }, + ) + .encode() + }; + + // global messages are gossiped to light clients though + assert!(val.message_allowed()( + &light_peer, + MessageIntent::Broadcast, + &communication::global_topic::(0), + &commit, + )); + } + + #[test] + fn only_gossip_commits_to_peers_on_same_set() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + // the validator starts at set id 1 + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add a new peer at set id 1 + let peer1 = PeerId::random(); + + val.inner.write().peers.new_peer(peer1, ObservedRole::Authority); + + val.inner + .write() + .peers + .update_peer_state( + &peer1, + NeighborPacket { round: Round(1), set_id: SetId(1), commit_finalized_height: 1 }, + ) + .unwrap(); + + // peer2 will default to set id 0 + let peer2 = PeerId::random(); + val.inner.write().peers.new_peer(peer2, ObservedRole::Authority); + + // create a commit for round 1 of set id 1 + // targeting a block at height 2 + let commit = { + let commit = finality_grandpa::CompactCommit { + target_hash: H256::random(), + target_number: 2, + precommits: Vec::new(), + auth_data: Vec::new(), + }; + + communication::gossip::GossipMessage::::Commit( + communication::gossip::FullCommitMessage { + round: Round(1), + set_id: SetId(1), + message: commit, + }, + ) + .encode() + }; + + // note the commit in the validator + val.note_commit_finalized(Round(1), SetId(1), 2, |_, _| {}); + + let mut message_allowed = val.message_allowed(); + + // the commit should be allowed to peer 1 + assert!(message_allowed( + &peer1, + MessageIntent::Broadcast, + &communication::global_topic::(1), + &commit, + )); + + // but disallowed to peer 2 since the peer is on set id 0 + // the commit should be allowed to peer 1 + assert!(!message_allowed( + &peer2, + MessageIntent::Broadcast, + &communication::global_topic::(1), + &commit, + )); + } + + #[test] + fn expire_commits_from_older_rounds() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + let commit = |round, set_id, target_number| { + let commit = finality_grandpa::CompactCommit { + target_hash: H256::random(), + target_number, + precommits: Vec::new(), + auth_data: Vec::new(), + }; + + communication::gossip::GossipMessage::::Commit( + communication::gossip::FullCommitMessage { + round: Round(round), + set_id: SetId(set_id), + message: commit, + }, + ) + .encode() + }; + + // note the beginning of a new set with id 1 + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // note a commit for round 1 in the validator + // finalizing a block at height 2 + val.note_commit_finalized(Round(1), SetId(1), 2, |_, _| {}); + + let mut message_expired = val.message_expired(); + + // a commit message for round 1 that finalizes the same height as we + // have observed previously should not be expired + assert!(!message_expired(communication::global_topic::(1), &commit(1, 1, 2),)); + + // it should be expired if it is for a lower block + assert!(message_expired(communication::global_topic::(1), &commit(1, 1, 1))); + + // or the same block height but from the previous round + assert!(message_expired(communication::global_topic::(1), &commit(0, 1, 2))); + } + + #[test] + fn allow_noting_different_authorities_for_same_set() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + let a1 = vec![UncheckedFrom::unchecked_from([0; 32])]; + val.note_set(SetId(1), a1.clone(), |_, _| {}); + + assert_eq!(val.inner().read().authorities, a1); + + let a2 = + vec![UncheckedFrom::unchecked_from([1; 32]), UncheckedFrom::unchecked_from([2; 32])]; + val.note_set(SetId(1), a2.clone(), |_, _| {}); + + assert_eq!(val.inner().read().authorities, a2); + } + + #[test] + fn sends_neighbor_packets_to_non_light_peers_when_starting_a_new_round() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + // initialize the validator to a stable set id + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + let authority_peer = PeerId::random(); + let full_peer = PeerId::random(); + let light_peer = PeerId::random(); + + val.inner.write().peers.new_peer(authority_peer, ObservedRole::Authority); + val.inner.write().peers.new_peer(full_peer, ObservedRole::Full); + val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); + + val.note_round(Round(2), |peers, message| { + assert_eq!(peers.len(), 2); + assert!(peers.contains(&authority_peer)); + assert!(peers.contains(&full_peer)); + assert!(!peers.contains(&light_peer)); + assert!(matches!(message, NeighborPacket { set_id: SetId(1), round: Round(2), .. })); + }); + } + + #[test] + fn sends_neighbor_packets_to_all_peers_when_starting_a_new_set() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); + + // initialize the validator to a stable set id + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + let authority_peer = PeerId::random(); + let full_peer = PeerId::random(); + let light_peer = PeerId::random(); + + val.inner.write().peers.new_peer(authority_peer, ObservedRole::Authority); + val.inner.write().peers.new_peer(full_peer, ObservedRole::Full); + val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); + + val.note_set(SetId(2), Vec::new(), |peers, message| { + assert_eq!(peers.len(), 3); + assert!(peers.contains(&authority_peer)); + assert!(peers.contains(&full_peer)); + assert!(peers.contains(&light_peer)); + assert!(matches!(message, NeighborPacket { set_id: SetId(2), round: Round(1), .. })); + }); + } +} diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs new file mode 100644 index 00000000..9d90035d --- /dev/null +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -0,0 +1,1104 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Communication streams for the polite-grandpa networking protocol. +//! +//! GRANDPA nodes communicate over a gossip network, where messages are not sent to +//! peers until they have reached a given round. +//! +//! Rather than expressing protocol rules, +//! polite-grandpa just carries a notion of impoliteness. Nodes which pass some arbitrary +//! threshold of impoliteness are removed. Messages are either costly, or beneficial. +//! +//! For instance, it is _impolite_ to send the same message more than once. +//! In the future, there will be a fallback for allowing sending the same message +//! under certain conditions that are used to un-stick the protocol. + +use futures::{channel::mpsc, prelude::*}; +use log::{debug, trace}; +use parking_lot::Mutex; +use prometheus_endpoint::Registry; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; + +use finality_grandpa::{ + voter, + voter_set::VoterSet, + Message::{Precommit, Prevote, PrimaryPropose}, +}; +use parity_scale_codec::{Decode, Encode}; +use sc_network::{NetworkBlock, NetworkSyncForkRequest, ReputationChange}; +use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; + +use crate::{ + environment::HasVoted, CatchUp, Commit, CommunicationIn, CommunicationOutH, CompactCommit, + Error, Message, SignedMessage, LOG_TARGET, +}; +use gossip::{ + FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, +}; +use sc_network_common::sync::SyncEventStream; +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; + +pub mod gossip; +mod periodic; + +#[cfg(test)] +pub(crate) mod tests; + +// How often to rebroadcast neighbor packets, in cases where no new packets are created. +pub(crate) const NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(2 * 60); + +pub mod grandpa_protocol_name { + use sc_chain_spec::ChainSpec; + use sc_network::types::ProtocolName; + + pub(crate) const NAME: &str = "/grandpa/1"; + /// Old names for the notifications protocol, used for backward compatibility. + pub(crate) const LEGACY_NAMES: [&str; 1] = ["/paritytech/grandpa/1"]; + + /// Name of the notifications protocol used by GRANDPA. + /// + /// Must be registered towards the networking in order for GRANDPA to properly function. + pub fn standard_name>( + genesis_hash: &Hash, + chain_spec: &Box, + ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); + let chain_prefix = match chain_spec.fork_id() { + Some(fork_id) => format!("/{}/{}", array_bytes::bytes2hex("", genesis_hash), fork_id), + None => format!("/{}", array_bytes::bytes2hex("", genesis_hash)), + }; + format!("{}{}", chain_prefix, NAME).into() + } +} + +// cost scalars for reporting peers. +mod cost { + use sc_network::ReputationChange as Rep; + pub(super) const PAST_REJECTION: Rep = Rep::new(-50, "Grandpa: Past message"); + pub(super) const BAD_SIGNATURE: Rep = Rep::new(-100, "Grandpa: Bad signature"); + pub(super) const MALFORMED_CATCH_UP: Rep = Rep::new(-1000, "Grandpa: Malformed cath-up"); + pub(super) const MALFORMED_COMMIT: Rep = Rep::new(-1000, "Grandpa: Malformed commit"); + pub(super) const FUTURE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Future message"); + pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "Grandpa: Unknown voter"); + + pub(super) const INVALID_VIEW_CHANGE: Rep = Rep::new(-500, "Grandpa: Invalid view change"); + pub(super) const DUPLICATE_NEIGHBOR_MESSAGE: Rep = + Rep::new(-500, "Grandpa: Duplicate neighbor message without grace period"); + pub(super) const PER_UNDECODABLE_BYTE: i32 = -5; + pub(super) const PER_SIGNATURE_CHECKED: i32 = -25; + pub(super) const PER_BLOCK_LOADED: i32 = -10; + pub(super) const INVALID_CATCH_UP: Rep = Rep::new(-5000, "Grandpa: Invalid catch-up"); + pub(super) const INVALID_COMMIT: Rep = Rep::new(-5000, "Grandpa: Invalid commit"); + pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Out-of-scope message"); + pub(super) const CATCH_UP_REQUEST_TIMEOUT: Rep = + Rep::new(-200, "Grandpa: Catch-up request timeout"); + + // cost of answering a catch up request + pub(super) const CATCH_UP_REPLY: Rep = Rep::new(-200, "Grandpa: Catch-up reply"); + pub(super) const HONEST_OUT_OF_SCOPE_CATCH_UP: Rep = + Rep::new(-200, "Grandpa: Out-of-scope catch-up"); +} + +// benefit scalars for reporting peers. +mod benefit { + use sc_network::ReputationChange as Rep; + pub(super) const NEIGHBOR_MESSAGE: Rep = Rep::new(100, "Grandpa: Neighbor message"); + pub(super) const ROUND_MESSAGE: Rep = Rep::new(100, "Grandpa: Round message"); + pub(super) const BASIC_VALIDATED_CATCH_UP: Rep = Rep::new(200, "Grandpa: Catch-up message"); + pub(super) const BASIC_VALIDATED_COMMIT: Rep = Rep::new(100, "Grandpa: Commit"); + pub(super) const PER_EQUIVOCATION: i32 = 10; +} + +/// A type that ties together our local authority id and a keystore where it is +/// available for signing. +pub struct LocalIdKeystore((AuthorityId, KeystorePtr)); + +impl LocalIdKeystore { + /// Returns a reference to our local authority id. + fn local_id(&self) -> &AuthorityId { + &(self.0).0 + } + + /// Returns a reference to the keystore. + fn keystore(&self) -> KeystorePtr { + (self.0).1.clone() + } +} + +impl From<(AuthorityId, KeystorePtr)> for LocalIdKeystore { + fn from(inner: (AuthorityId, KeystorePtr)) -> LocalIdKeystore { + LocalIdKeystore(inner) + } +} + +/// If the voter set is larger than this value some telemetry events are not +/// sent to avoid increasing usage resource on the node and flooding the +/// telemetry server (e.g. received votes, received commits.) +const TELEMETRY_VOTERS_LIMIT: usize = 10; + +/// A handle to the network. +/// +/// Something that provides the capabilities needed for the `gossip_network::Network` trait. +pub trait Network: GossipNetwork + Clone + Send + 'static {} + +impl Network for T +where + Block: BlockT, + T: GossipNetwork + Clone + Send + 'static, +{ +} + +/// A handle to syncing-related services. +/// +/// Something that provides the ability to set a fork sync request for a particular block. +pub trait Syncing: + NetworkSyncForkRequest> + + NetworkBlock> + + SyncEventStream + + Clone + + Send + + 'static +{ +} + +impl Syncing for T +where + Block: BlockT, + T: NetworkSyncForkRequest> + + NetworkBlock> + + SyncEventStream + + Clone + + Send + + 'static, +{ +} + +/// Create a unique topic for a round and set-id combo. +pub(crate) fn round_topic(round: RoundNumber, set_id: SetIdNumber) -> B::Hash { + <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) +} + +/// Create a unique topic for global messages on a set ID. +pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { + <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) +} + +/// Bridge between the underlying network service, gossiping consensus messages and Grandpa +pub(crate) struct NetworkBridge, S: Syncing> { + service: N, + sync: S, + gossip_engine: Arc>>, + validator: Arc>, + + /// Sender side of the neighbor packet channel. + /// + /// Packets sent into this channel are processed by the `NeighborPacketWorker` and passed on to + /// the underlying `GossipEngine`. + neighbor_sender: periodic::NeighborPacketSender, + + /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. + // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // children, thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. + neighbor_packet_worker: Arc>>, + + /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the + /// gossip engine. + // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its + // children, thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given + // that it is just an `UnboundedReceiver`, one could also switch to a + // multi-producer-*multi*-consumer channel implementation. + gossip_validator_report_stream: Arc>>, + + telemetry: Option, +} + +impl, S: Syncing> Unpin for NetworkBridge {} + +impl, S: Syncing> NetworkBridge { + /// Create a new NetworkBridge to the given NetworkService. Returns the service + /// handle. + /// On creation it will register previous rounds' votes with the gossip + /// service taken from the VoterSetState. + pub(crate) fn new( + service: N, + sync: S, + config: crate::Config, + set_state: crate::environment::SharedVoterSetState, + prometheus_registry: Option<&Registry>, + telemetry: Option, + ) -> Self { + let protocol = config.protocol_name.clone(); + let (validator, report_stream) = + GossipValidator::new(config, set_state.clone(), prometheus_registry, telemetry.clone()); + + let validator = Arc::new(validator); + let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( + service.clone(), + sync.clone(), + protocol, + validator.clone(), + prometheus_registry, + ))); + + { + // register all previous votes with the gossip service so that they're + // available to peers potentially stuck on a previous round. + let completed = set_state.read().completed_rounds(); + let (set_id, voters) = completed.set_info(); + validator.note_set(SetId(set_id), voters.to_vec(), |_, _| {}); + for round in completed.iter() { + let topic = round_topic::(round.number, set_id); + + // we need to note the round with the gossip validator otherwise + // messages will be ignored. + validator.note_round(Round(round.number), |_, _| {}); + + for signed in round.votes.iter() { + let message = gossip::GossipMessage::Vote(gossip::VoteMessage:: { + message: signed.clone(), + round: Round(round.number), + set_id: SetId(set_id), + }); + + gossip_engine.lock().register_gossip_message(topic, message.encode()); + } + + trace!( + target: LOG_TARGET, + "Registered {} messages for topic {:?} (round: {}, set_id: {})", + round.votes.len(), + topic, + round.number, + set_id, + ); + } + } + + let (neighbor_packet_worker, neighbor_packet_sender) = + periodic::NeighborPacketWorker::new(NEIGHBOR_REBROADCAST_PERIOD); + + NetworkBridge { + service, + sync, + gossip_engine, + validator, + neighbor_sender: neighbor_packet_sender, + neighbor_packet_worker: Arc::new(Mutex::new(neighbor_packet_worker)), + gossip_validator_report_stream: Arc::new(Mutex::new(report_stream)), + telemetry, + } + } + + /// Note the beginning of a new round to the `GossipValidator`. + pub(crate) fn note_round(&self, round: Round, set_id: SetId, voters: &VoterSet) { + // is a no-op if currently in that set. + self.validator.note_set( + set_id, + voters.iter().map(|(v, _)| v.clone()).collect(), + |to, neighbor| self.neighbor_sender.send(to, neighbor), + ); + + self.validator + .note_round(round, |to, neighbor| self.neighbor_sender.send(to, neighbor)); + } + + /// Get a stream of signature-checked round messages from the network as well as a sink for + /// round messages to the network all within the current set. + pub(crate) fn round_communication( + &self, + keystore: Option, + round: Round, + set_id: SetId, + voters: Arc>, + has_voted: HasVoted, + ) -> (impl Stream> + Unpin, OutgoingMessages) { + self.note_round(round, set_id, &voters); + + let keystore = keystore.and_then(|ks| { + let id = ks.local_id(); + if voters.contains(id) { + Some(ks) + } else { + None + } + }); + + let topic = round_topic::(round.0, set_id.0); + let telemetry = self.telemetry.clone(); + let incoming = + self.gossip_engine.lock().messages_for(topic).filter_map(move |notification| { + let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); + + match decoded { + Err(ref e) => { + debug!( + target: LOG_TARGET, + "Skipping malformed message {:?}: {}", notification, e + ); + future::ready(None) + }, + Ok(GossipMessage::Vote(msg)) => { + // check signature. + if !voters.contains(&msg.message.id) { + debug!( + target: LOG_TARGET, + "Skipping message from unknown voter {}", msg.message.id + ); + return future::ready(None) + } + + if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { + match &msg.message.message { + PrimaryPropose(propose) => { + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_propose"; + "voter" => ?format!("{}", msg.message.id), + "target_number" => ?propose.target_number, + "target_hash" => ?propose.target_hash, + ); + }, + Prevote(prevote) => { + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_prevote"; + "voter" => ?format!("{}", msg.message.id), + "target_number" => ?prevote.target_number, + "target_hash" => ?prevote.target_hash, + ); + }, + Precommit(precommit) => { + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_precommit"; + "voter" => ?format!("{}", msg.message.id), + "target_number" => ?precommit.target_number, + "target_hash" => ?precommit.target_hash, + ); + }, + }; + } + + future::ready(Some(msg.message)) + }, + _ => { + debug!(target: LOG_TARGET, "Skipping unknown message type"); + future::ready(None) + }, + } + }); + + let (tx, out_rx) = mpsc::channel(0); + let outgoing = OutgoingMessages:: { + keystore, + round: round.0, + set_id: set_id.0, + network: self.gossip_engine.clone(), + sender: tx, + has_voted, + telemetry: self.telemetry.clone(), + }; + + // Combine incoming votes from external GRANDPA nodes with outgoing + // votes from our own GRANDPA voter to have a single + // vote-import-pipeline. + let incoming = stream::select(incoming, out_rx); + + (incoming, outgoing) + } + + /// Set up the global communication streams. + pub(crate) fn global_communication( + &self, + set_id: SetId, + voters: Arc>, + is_voter: bool, + ) -> ( + impl Stream>, + impl Sink, Error = Error> + Unpin, + ) { + self.validator.note_set( + set_id, + voters.iter().map(|(v, _)| v.clone()).collect(), + |to, neighbor| self.neighbor_sender.send(to, neighbor), + ); + + let topic = global_topic::(set_id.0); + let incoming = incoming_global( + self.gossip_engine.clone(), + topic, + voters, + self.validator.clone(), + self.neighbor_sender.clone(), + self.telemetry.clone(), + ); + + let outgoing = CommitsOut::::new( + self.gossip_engine.clone(), + set_id.0, + is_voter, + self.validator.clone(), + self.neighbor_sender.clone(), + self.telemetry.clone(), + ); + + let outgoing = outgoing.with(|out| { + let voter::CommunicationOut::Commit(round, commit) = out; + future::ok((round, commit)) + }); + + (incoming, outgoing) + } + + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + pub(crate) fn set_sync_fork_request( + &self, + peers: Vec, + hash: B::Hash, + number: NumberFor, + ) { + self.sync.set_sync_fork_request(peers, hash, number) + } +} + +impl, S: Syncing> Future for NetworkBridge { + type Output = Result<(), Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + loop { + match self.neighbor_packet_worker.lock().poll_next_unpin(cx) { + Poll::Ready(Some((to, packet))) => { + self.gossip_engine.lock().send_message(to, packet.encode()); + }, + Poll::Ready(None) => + return Poll::Ready(Err(Error::Network( + "Neighbor packet worker stream closed.".into(), + ))), + Poll::Pending => break, + } + } + + loop { + match self.gossip_validator_report_stream.lock().poll_next_unpin(cx) { + Poll::Ready(Some(PeerReport { who, cost_benefit })) => { + self.gossip_engine.lock().report(who, cost_benefit); + }, + Poll::Ready(None) => + return Poll::Ready(Err(Error::Network( + "Gossip validator report stream closed.".into(), + ))), + Poll::Pending => break, + } + } + + match self.gossip_engine.lock().poll_unpin(cx) { + Poll::Ready(()) => + return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))), + Poll::Pending => {}, + } + + Poll::Pending + } +} + +fn incoming_global( + gossip_engine: Arc>>, + topic: B::Hash, + voters: Arc>, + gossip_validator: Arc>, + neighbor_sender: periodic::NeighborPacketSender, + telemetry: Option, +) -> impl Stream> { + let process_commit = { + let telemetry = telemetry.clone(); + move |msg: FullCommitMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { + if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { + let precommits_signed_by: Vec = + msg.message.auth_data.iter().map(move |(_, a)| format!("{}", a)).collect(); + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.received_commit"; + "contains_precommits_signed_by" => ?precommits_signed_by, + "target_number" => ?msg.message.target_number.clone(), + "target_hash" => ?msg.message.target_hash.clone(), + ); + } + + if let Err(cost) = check_compact_commit::( + &msg.message, + voters, + msg.round, + msg.set_id, + telemetry.as_ref(), + ) { + if let Some(who) = notification.sender { + gossip_engine.lock().report(who, cost); + } + + return None + } + + let round = msg.round; + let set_id = msg.set_id; + let commit = msg.message; + let finalized_number = commit.target_number; + let gossip_validator = gossip_validator.clone(); + let gossip_engine = gossip_engine.clone(); + let neighbor_sender = neighbor_sender.clone(); + let cb = move |outcome| match outcome { + voter::CommitProcessingOutcome::Good(_) => { + // if it checks out, gossip it. not accounting for + // any discrepancy between the actual ghost and the claimed + // finalized number. + gossip_validator.note_commit_finalized( + round, + set_id, + finalized_number, + |to, neighbor| neighbor_sender.send(to, neighbor), + ); + + gossip_engine.lock().gossip_message(topic, notification.message.clone(), false); + }, + voter::CommitProcessingOutcome::Bad(_) => { + // report peer and do not gossip. + if let Some(who) = notification.sender.take() { + gossip_engine.lock().report(who, cost::INVALID_COMMIT); + } + }, + }; + + let cb = voter::Callback::Work(Box::new(cb)); + + Some(voter::CommunicationIn::Commit(round.0, commit, cb)) + } + }; + + let process_catch_up = move |msg: FullCatchUpMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { + let gossip_validator = gossip_validator.clone(); + let gossip_engine = gossip_engine.clone(); + + if let Err(cost) = check_catch_up::(&msg.message, voters, msg.set_id, telemetry.clone()) + { + if let Some(who) = notification.sender { + gossip_engine.lock().report(who, cost); + } + + return None + } + + let cb = move |outcome| { + if let voter::CatchUpProcessingOutcome::Bad(_) = outcome { + // report peer + if let Some(who) = notification.sender.take() { + gossip_engine.lock().report(who, cost::INVALID_CATCH_UP); + } + } + + gossip_validator.note_catch_up_message_processed(); + }; + + let cb = voter::Callback::Work(Box::new(cb)); + + Some(voter::CommunicationIn::CatchUp(msg.message, cb)) + }; + + gossip_engine + .clone() + .lock() + .messages_for(topic) + .filter_map(|notification| { + // this could be optimized by decoding piecewise. + let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); + if let Err(ref e) = decoded { + trace!( + target: LOG_TARGET, + "Skipping malformed commit message {:?}: {}", + notification, + e + ); + } + future::ready(decoded.map(move |d| (notification, d)).ok()) + }) + .filter_map(move |(notification, msg)| { + future::ready(match msg { + GossipMessage::Commit(msg) => + process_commit(msg, notification, &gossip_engine, &gossip_validator, &voters), + GossipMessage::CatchUp(msg) => + process_catch_up(msg, notification, &gossip_engine, &gossip_validator, &voters), + _ => { + debug!(target: LOG_TARGET, "Skipping unknown message type"); + None + }, + }) + }) +} + +impl, S: Syncing> Clone for NetworkBridge { + fn clone(&self) -> Self { + NetworkBridge { + service: self.service.clone(), + sync: self.sync.clone(), + gossip_engine: self.gossip_engine.clone(), + validator: Arc::clone(&self.validator), + neighbor_sender: self.neighbor_sender.clone(), + neighbor_packet_worker: self.neighbor_packet_worker.clone(), + gossip_validator_report_stream: self.gossip_validator_report_stream.clone(), + telemetry: self.telemetry.clone(), + } + } +} + +/// Type-safe wrapper around a round number. +#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Encode, Decode)] +pub struct Round(pub RoundNumber); + +/// Type-safe wrapper around a set ID. +#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord, Encode, Decode)] +pub struct SetId(pub SetIdNumber); + +/// A sink for outgoing messages to the network. Any messages that are sent will +/// be replaced, as appropriate, according to the given `HasVoted`. +/// NOTE: The votes are stored unsigned, which means that the signatures need to +/// be "stable", i.e. we should end up with the exact same signed message if we +/// use the same raw message and key to sign. This is currently true for +/// `ed25519` and `BLS` signatures (which we might use in the future), care must +/// be taken when switching to different key types. +pub(crate) struct OutgoingMessages { + round: RoundNumber, + set_id: SetIdNumber, + keystore: Option, + sender: mpsc::Sender>, + network: Arc>>, + has_voted: HasVoted, + telemetry: Option, +} + +impl Unpin for OutgoingMessages {} + +impl Sink> for OutgoingMessages { + type Error = Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + Sink::poll_ready(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { + Error::Network(format!("Failed to poll_ready channel sender: {:?}", e)) + }) + }) + } + + fn start_send( + mut self: Pin<&mut Self>, + mut msg: Message, + ) -> Result<(), Self::Error> { + // if we've voted on this round previously under the same key, send that vote instead + match &mut msg { + finality_grandpa::Message::PrimaryPropose(ref mut vote) => { + if let Some(propose) = self.has_voted.propose() { + *vote = propose.clone(); + } + }, + finality_grandpa::Message::Prevote(ref mut vote) => { + if let Some(prevote) = self.has_voted.prevote() { + *vote = prevote.clone(); + } + }, + finality_grandpa::Message::Precommit(ref mut vote) => { + if let Some(precommit) = self.has_voted.precommit() { + *vote = precommit.clone(); + } + }, + } + + // when locals exist, sign messages on import + if let Some(ref keystore) = self.keystore { + let target_hash = *(msg.target().0); + let signed = sp_consensus_grandpa::sign_message( + keystore.keystore(), + msg, + keystore.local_id().clone(), + self.round, + self.set_id, + ) + .ok_or_else(|| { + Error::Signing(format!( + "Failed to sign GRANDPA vote for round {} targetting {:?}", + self.round, target_hash + )) + })?; + + let message = GossipMessage::Vote(VoteMessage:: { + message: signed.clone(), + round: Round(self.round), + set_id: SetId(self.set_id), + }); + + debug!( + target: LOG_TARGET, + "Announcing block {} to peers which we voted on in round {} in set {}", + target_hash, + self.round, + self.set_id, + ); + + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.announcing_blocks_to_voted_peers"; + "block" => ?target_hash, "round" => ?self.round, "set_id" => ?self.set_id, + ); + + // announce the block we voted on to our peers. + self.network.lock().announce(target_hash, None); + + // propagate the message to peers + let topic = round_topic::(self.round, self.set_id); + self.network.lock().gossip_message(topic, message.encode(), false); + + // forward the message to the inner sender. + return self.sender.start_send(signed).map_err(|e| { + Error::Network(format!("Failed to start_send on channel sender: {:?}", e)) + }) + }; + + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + Sink::poll_close(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { + Error::Network(format!("Failed to poll_close channel sender: {:?}", e)) + }) + }) + } +} + +// checks a compact commit. returns the cost associated with processing it if +// the commit was bad. +fn check_compact_commit( + msg: &CompactCommit, + voters: &VoterSet, + round: Round, + set_id: SetId, + telemetry: Option<&TelemetryHandle>, +) -> Result<(), ReputationChange> { + // 4f + 1 = equivocations from f voters. + let f = voters.total_weight() - voters.threshold(); + let full_threshold = (f + voters.total_weight()).0; + + // check total weight is not out of range. + let mut total_weight = 0; + for (_, ref id) in &msg.auth_data { + if let Some(weight) = voters.get(id).map(|info| info.weight()) { + total_weight += weight.get(); + if total_weight > full_threshold { + return Err(cost::MALFORMED_COMMIT) + } + } else { + debug!(target: LOG_TARGET, "Skipping commit containing unknown voter {}", id); + return Err(cost::MALFORMED_COMMIT) + } + } + + if total_weight < voters.threshold().get() { + return Err(cost::MALFORMED_COMMIT) + } + + // check signatures on all contained precommits. + let mut buf = Vec::new(); + for (i, (precommit, (sig, id))) in msg.precommits.iter().zip(&msg.auth_data).enumerate() { + use crate::communication::gossip::Misbehavior; + use finality_grandpa::Message as GrandpaMessage; + + if !sp_consensus_grandpa::check_message_signature_with_buffer( + &GrandpaMessage::Precommit(precommit.clone()), + id, + sig, + round.0, + set_id.0, + &mut buf, + ) { + debug!(target: LOG_TARGET, "Bad commit message signature {}", id); + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "afg.bad_commit_msg_signature"; + "id" => ?id, + ); + let cost = Misbehavior::BadCommitMessage { + signatures_checked: i as i32, + blocks_loaded: 0, + equivocations_caught: 0, + } + .cost(); + + return Err(cost) + } + } + + Ok(()) +} + +// checks a catch up. returns the cost associated with processing it if +// the catch up was bad. +fn check_catch_up( + msg: &CatchUp, + voters: &VoterSet, + set_id: SetId, + telemetry: Option, +) -> Result<(), ReputationChange> { + // 4f + 1 = equivocations from f voters. + let f = voters.total_weight() - voters.threshold(); + let full_threshold = (f + voters.total_weight()).0; + + // check total weight is not out of range for a set of votes. + fn check_weight<'a>( + voters: &'a VoterSet, + votes: impl Iterator, + full_threshold: u64, + ) -> Result<(), ReputationChange> { + let mut total_weight = 0; + + for id in votes { + if let Some(weight) = voters.get(id).map(|info| info.weight()) { + total_weight += weight.get(); + if total_weight > full_threshold { + return Err(cost::MALFORMED_CATCH_UP) + } + } else { + debug!( + target: LOG_TARGET, + "Skipping catch up message containing unknown voter {}", id + ); + return Err(cost::MALFORMED_CATCH_UP) + } + } + + if total_weight < voters.threshold().get() { + return Err(cost::MALFORMED_CATCH_UP) + } + + Ok(()) + } + + check_weight(voters, msg.prevotes.iter().map(|vote| &vote.id), full_threshold)?; + + check_weight(voters, msg.precommits.iter().map(|vote| &vote.id), full_threshold)?; + + fn check_signatures<'a, B, I>( + messages: I, + round: RoundNumber, + set_id: SetIdNumber, + mut signatures_checked: usize, + buf: &mut Vec, + telemetry: Option, + ) -> Result + where + B: BlockT, + I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, + { + use crate::communication::gossip::Misbehavior; + + for (msg, id, sig) in messages { + signatures_checked += 1; + + if !sp_consensus_grandpa::check_message_signature_with_buffer( + &msg, id, sig, round, set_id, buf, + ) { + debug!(target: LOG_TARGET, "Bad catch up message signature {}", id); + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "afg.bad_catch_up_msg_signature"; + "id" => ?id, + ); + + let cost = Misbehavior::BadCatchUpMessage { + signatures_checked: signatures_checked as i32, + } + .cost(); + + return Err(cost) + } + } + + Ok(signatures_checked) + } + + let mut buf = Vec::new(); + + // check signatures on all contained prevotes. + let signatures_checked = check_signatures::( + msg.prevotes.iter().map(|vote| { + (finality_grandpa::Message::Prevote(vote.prevote.clone()), &vote.id, &vote.signature) + }), + msg.round_number, + set_id.0, + 0, + &mut buf, + telemetry.clone(), + )?; + + // check signatures on all contained precommits. + let _ = check_signatures::( + msg.precommits.iter().map(|vote| { + ( + finality_grandpa::Message::Precommit(vote.precommit.clone()), + &vote.id, + &vote.signature, + ) + }), + msg.round_number, + set_id.0, + signatures_checked, + &mut buf, + telemetry, + )?; + + Ok(()) +} + +/// An output sink for commit messages. +struct CommitsOut { + network: Arc>>, + set_id: SetId, + is_voter: bool, + gossip_validator: Arc>, + neighbor_sender: periodic::NeighborPacketSender, + telemetry: Option, +} + +impl CommitsOut { + /// Create a new commit output stream. + pub(crate) fn new( + network: Arc>>, + set_id: SetIdNumber, + is_voter: bool, + gossip_validator: Arc>, + neighbor_sender: periodic::NeighborPacketSender, + telemetry: Option, + ) -> Self { + CommitsOut { + network, + set_id: SetId(set_id), + is_voter, + gossip_validator, + neighbor_sender, + telemetry, + } + } +} + +impl Sink<(RoundNumber, Commit)> for CommitsOut { + type Error = Error; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send( + self: Pin<&mut Self>, + input: (RoundNumber, Commit), + ) -> Result<(), Self::Error> { + if !self.is_voter { + return Ok(()) + } + + let (round, commit) = input; + let round = Round(round); + + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.commit_issued"; + "target_number" => ?commit.target_number, + "target_hash" => ?commit.target_hash, + ); + let (precommits, auth_data) = commit + .precommits + .into_iter() + .map(|signed| (signed.precommit, (signed.signature, signed.id))) + .unzip(); + + let compact_commit = CompactCommit:: { + target_hash: commit.target_hash, + target_number: commit.target_number, + precommits, + auth_data, + }; + + let message = GossipMessage::Commit(FullCommitMessage:: { + round, + set_id: self.set_id, + message: compact_commit, + }); + + let topic = global_topic::(self.set_id.0); + + // the gossip validator needs to be made aware of the best commit-height we know of + // before gossiping + self.gossip_validator.note_commit_finalized( + round, + self.set_id, + commit.target_number, + |to, neighbor| self.neighbor_sender.send(to, neighbor), + ); + self.network.lock().gossip_message(topic, message.encode(), false); + + Ok(()) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } +} diff --git a/substrate/client/consensus/grandpa/src/communication/periodic.rs b/substrate/client/consensus/grandpa/src/communication/periodic.rs new file mode 100644 index 00000000..f3f75728 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/communication/periodic.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Periodic rebroadcast of neighbor packets. + +use futures::{future::FutureExt as _, prelude::*, ready, stream::Stream}; +use futures_timer::Delay; +use log::debug; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; + +use sc_network::PeerId; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +use super::gossip::{GossipMessage, NeighborPacket}; +use crate::LOG_TARGET; + +/// A sender used to send neighbor packets to a background job. +#[derive(Clone)] +pub(super) struct NeighborPacketSender( + TracingUnboundedSender<(Vec, NeighborPacket>)>, +); + +impl NeighborPacketSender { + /// Send a neighbor packet for the background worker to gossip to peers. + pub fn send( + &self, + who: Vec, + neighbor_packet: NeighborPacket>, + ) { + if let Err(err) = self.0.unbounded_send((who, neighbor_packet)) { + debug!(target: LOG_TARGET, "Failed to send neighbor packet: {:?}", err); + } + } +} + +/// NeighborPacketWorker is listening on a channel for new neighbor packets being produced by +/// components within `finality-grandpa` and forwards those packets to the underlying +/// `NetworkEngine` through the `NetworkBridge` that it is being polled by (see `Stream` +/// implementation). Periodically it sends out the last packet in cases where no new ones arrive. +pub(super) struct NeighborPacketWorker { + last: Option<(Vec, NeighborPacket>)>, + rebroadcast_period: Duration, + delay: Delay, + rx: TracingUnboundedReceiver<(Vec, NeighborPacket>)>, +} + +impl Unpin for NeighborPacketWorker {} + +impl NeighborPacketWorker { + pub(super) fn new(rebroadcast_period: Duration) -> (Self, NeighborPacketSender) { + let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( + "mpsc_grandpa_neighbor_packet_worker", + 100_000, + ); + let delay = Delay::new(rebroadcast_period); + + ( + NeighborPacketWorker { last: None, rebroadcast_period, delay, rx }, + NeighborPacketSender(tx), + ) + } +} + +impl Stream for NeighborPacketWorker { + type Item = (Vec, GossipMessage); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = &mut *self; + match this.rx.poll_next_unpin(cx) { + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some((to, packet))) => { + this.delay.reset(this.rebroadcast_period); + this.last = Some((to.clone(), packet.clone())); + + return Poll::Ready(Some((to, GossipMessage::::from(packet)))) + }, + // Don't return yet, maybe the timer fired. + Poll::Pending => {}, + }; + + ready!(this.delay.poll_unpin(cx)); + + // Getting this far here implies that the timer fired. + + this.delay.reset(this.rebroadcast_period); + + // Make sure the underlying task is scheduled for wake-up. + // + // Note: In case poll_unpin is called after the resetted delay fires again, this + // will drop one tick. Deemed as very unlikely and also not critical. + while let Poll::Ready(()) = this.delay.poll_unpin(cx) {} + + if let Some((ref to, ref packet)) = this.last { + return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))) + } + + Poll::Pending + } +} diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs new file mode 100644 index 00000000..53f09c81 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -0,0 +1,685 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests for the communication portion of the GRANDPA crate. + +use super::{ + gossip::{self, GossipValidator}, + Round, SetId, VoterSet, +}; +use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; +use futures::prelude::*; +use parity_scale_codec::Encode; +use sc_network::{ + config::{MultiaddrWithPeerId, Role}, + event::Event as NetworkEvent, + types::ProtocolName, + Multiaddr, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT as NotificationSender, + PeerId, ReputationChange, +}; +use sc_network_common::{ + role::ObservedRole, + sync::{SyncEvent as SyncStreamEvent, SyncEventStream}, +}; +use sc_network_gossip::Validator; +use sc_network_test::{Block, Hash}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_consensus_grandpa::AuthorityList; +use sp_keyring::Ed25519Keyring; +use sp_runtime::traits::NumberFor; +use std::{ + collections::HashSet, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +#[derive(Debug)] +pub(crate) enum Event { + EventStream(TracingUnboundedSender), + WriteNotification(PeerId, Vec), + Report(PeerId, ReputationChange), + Announce(Hash), +} + +#[derive(Clone)] +pub(crate) struct TestNetwork { + sender: TracingUnboundedSender, +} + +impl NetworkPeers for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) {} + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: ProtocolName, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: ProtocolName, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} + + fn sync_num_connected(&self) -> usize { + unimplemented!(); + } +} + +impl NetworkEventStream for TestNetwork { + fn event_stream( + &self, + _name: &'static str, + ) -> Pin + Send>> { + let (tx, rx) = tracing_unbounded("test", 100_000); + let _ = self.sender.unbounded_send(Event::EventStream(tx)); + Box::pin(rx) + } +} + +impl NetworkNotification for TestNetwork { + fn write_notification(&self, target: PeerId, _protocol: ProtocolName, message: Vec) { + let _ = self.sender.unbounded_send(Event::WriteNotification(target, message)); + } + + fn notification_sender( + &self, + _target: PeerId, + _protocol: ProtocolName, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } + + fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec) { + unimplemented!(); + } +} + +impl NetworkBlock> for TestNetwork { + fn announce_block(&self, hash: Hash, _data: Option>) { + let _ = self.sender.unbounded_send(Event::Announce(hash)); + } + + fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { + unimplemented!(); + } +} + +impl NetworkSyncForkRequest> for TestNetwork { + fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} +} + +impl sc_network_gossip::ValidatorContext for TestNetwork { + fn broadcast_topic(&mut self, _: Hash, _: bool) {} + + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} + + fn send_message(&mut self, who: &PeerId, data: Vec) { + ::write_notification( + self, + *who, + grandpa_protocol_name::NAME.into(), + data, + ); + } + + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} +} + +#[derive(Clone)] +pub(crate) struct TestSync; + +impl SyncEventStream for TestSync { + fn event_stream( + &self, + _name: &'static str, + ) -> Pin + Send>> { + Box::pin(futures::stream::pending()) + } +} + +impl NetworkBlock> for TestSync { + fn announce_block(&self, _hash: Hash, _data: Option>) { + unimplemented!(); + } + + fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { + unimplemented!(); + } +} + +impl NetworkSyncForkRequest> for TestSync { + fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} +} + +pub(crate) struct Tester { + pub(crate) net_handle: super::NetworkBridge, + gossip_validator: Arc>, + pub(crate) events: TracingUnboundedReceiver, +} + +impl Tester { + fn filter_network_events(self, mut pred: F) -> impl Future + where + F: FnMut(Event) -> bool, + { + let mut s = Some(self); + futures::future::poll_fn(move |cx| loop { + match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) { + Poll::Ready(None) => panic!("concluded early"), + Poll::Ready(Some(item)) => + if pred(item) { + return Poll::Ready(s.take().unwrap()) + }, + Poll::Pending => return Poll::Pending, + } + }) + } + + pub(crate) fn trigger_gossip_validator_reputation_change(&self, p: &PeerId) { + self.gossip_validator.validate( + &mut crate::communication::tests::NoopContext, + p, + &vec![1, 2, 3], + ); + } +} + +// some random config (not really needed) +fn config() -> crate::Config { + crate::Config { + gossip_duration: std::time::Duration::from_millis(10), + justification_generation_period: 256, + keystore: None, + name: None, + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + } +} + +// dummy voter set state +fn voter_set_state() -> SharedVoterSetState { + use crate::{authorities::AuthoritySet, environment::VoterSetState}; + use finality_grandpa::round::State as RoundState; + use sp_consensus_grandpa::AuthorityId; + use sp_core::{crypto::ByteArray, H256}; + + let state = RoundState::genesis((H256::zero(), 0)); + let base = state.prevote_ghost.unwrap(); + + let voters = vec![(AuthorityId::from_slice(&[1; 32]).unwrap(), 1)]; + let voters = AuthoritySet::genesis(voters).unwrap(); + + let set_state = VoterSetState::live(0, &voters, base); + + set_state.into() +} + +// needs to run in a tokio runtime. +pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { + let (tx, rx) = tracing_unbounded("test", 100_000); + let net = TestNetwork { sender: tx }; + let sync = TestSync {}; + + #[derive(Clone)] + struct Exit; + + impl futures::Future for Exit { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut Context) -> Poll<()> { + Poll::Pending + } + } + + let bridge = + super::NetworkBridge::new(net.clone(), sync, config(), voter_set_state(), None, None); + + ( + futures::future::ready(Tester { + gossip_validator: bridge.validator.clone(), + net_handle: bridge, + events: rx, + }), + net, + ) +} + +fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { + keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() +} + +struct NoopContext; + +impl sc_network_gossip::ValidatorContext for NoopContext { + fn broadcast_topic(&mut self, _: Hash, _: bool) {} + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} + fn send_message(&mut self, _: &PeerId, _: Vec) {} + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} +} + +#[test] +fn good_commit_leads_to_relay() { + let private = [Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let public = make_ids(&private[..]); + let voter_set = Arc::new(VoterSet::new(public.iter().cloned()).unwrap()); + + let round = 1; + let set_id = 1; + + let commit = { + let target_hash: Hash = [1; 32].into(); + let target_number = 500; + + let precommit = finality_grandpa::Precommit { target_hash, target_number }; + let payload = sp_consensus_grandpa::localized_payload( + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), + ); + + let mut precommits = Vec::new(); + let mut auth_data = Vec::new(); + + for (i, key) in private.iter().enumerate() { + precommits.push(precommit.clone()); + + let signature = sp_consensus_grandpa::AuthoritySignature::from(key.sign(&payload[..])); + auth_data.push((signature, public[i].0.clone())) + } + + finality_grandpa::CompactCommit { target_hash, target_number, precommits, auth_data } + }; + + let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { + round: Round(round), + set_id: SetId(set_id), + message: commit, + }) + .encode(); + + let id = PeerId::random(); + let global_topic = super::global_topic::(set_id); + + let test = make_test_network() + .0 + .then(move |tester| { + // register a peer. + tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); + future::ready((tester, id)) + }) + .then(move |(tester, id)| { + // start round, dispatch commit, and wait for broadcast. + let (commits_in, _) = + tester.net_handle.global_communication(SetId(1), voter_set, false); + + { + let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); + match action { + gossip::Action::ProcessAndDiscard(t, _) => assert_eq!(t, global_topic), + _ => panic!("wrong expected outcome from initial commit validation"), + } + } + + let commit_to_send = encoded_commit.clone(); + let network_bridge = tester.net_handle.clone(); + + // asking for global communication will cause the test network + // to send us an event asking us for a stream. use it to + // send a message. + let sender_id = id; + let send_message = tester.filter_network_events(move |event| match event { + Event::EventStream(sender) => { + // Add the sending peer and send the commit + let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { + remote: sender_id, + protocol: grandpa_protocol_name::NAME.into(), + negotiated_fallback: None, + role: ObservedRole::Full, + received_handshake: vec![], + }); + + let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { + remote: sender_id, + messages: vec![( + grandpa_protocol_name::NAME.into(), + commit_to_send.clone().into(), + )], + }); + + // Add a random peer which will be the recipient of this message + let receiver_id = PeerId::random(); + let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { + remote: receiver_id, + protocol: grandpa_protocol_name::NAME.into(), + negotiated_fallback: None, + role: ObservedRole::Full, + received_handshake: vec![], + }); + + // Announce its local set has being on the current set id through a neighbor + // packet, otherwise it won't be eligible to receive the commit + let _ = { + let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 1, + }); + + let msg = gossip::GossipMessage::::Neighbor(update); + + sender.unbounded_send(NetworkEvent::NotificationsReceived { + remote: receiver_id, + messages: vec![( + grandpa_protocol_name::NAME.into(), + msg.encode().into(), + )], + }) + }; + + true + }, + _ => false, + }); + + // when the commit comes in, we'll tell the callback it was good. + let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::good()); + }, + _ => panic!("commit expected"), + }); + + // once the message is sent and commit is "handled" we should have + // a repropagation event coming from the network. + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::WriteNotification(_, data) => data == encoded_commit, + _ => false, + }) + }) + .map(|_| ()); + + // Poll both the future sending and handling the commit, as well as the underlying + // NetworkBridge. Complete once the former completes. + future::select(fut, network_bridge) + }); + + futures::executor::block_on(test); +} + +#[test] +fn bad_commit_leads_to_report() { + sp_tracing::try_init_simple(); + let private = [Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let public = make_ids(&private[..]); + let voter_set = Arc::new(VoterSet::new(public.iter().cloned()).unwrap()); + + let round = 1; + let set_id = 1; + + let commit = { + let target_hash: Hash = [1; 32].into(); + let target_number = 500; + + let precommit = finality_grandpa::Precommit { target_hash, target_number }; + let payload = sp_consensus_grandpa::localized_payload( + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), + ); + + let mut precommits = Vec::new(); + let mut auth_data = Vec::new(); + + for (i, key) in private.iter().enumerate() { + precommits.push(precommit.clone()); + + let signature = sp_consensus_grandpa::AuthoritySignature::from(key.sign(&payload[..])); + auth_data.push((signature, public[i].0.clone())) + } + + finality_grandpa::CompactCommit { target_hash, target_number, precommits, auth_data } + }; + + let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { + round: Round(round), + set_id: SetId(set_id), + message: commit, + }) + .encode(); + + let id = PeerId::random(); + let global_topic = super::global_topic::(set_id); + + let test = make_test_network() + .0 + .map(move |tester| { + // register a peer. + tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); + (tester, id) + }) + .then(move |(tester, id)| { + // start round, dispatch commit, and wait for broadcast. + let (commits_in, _) = + tester.net_handle.global_communication(SetId(1), voter_set, false); + + { + let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); + match action { + gossip::Action::ProcessAndDiscard(t, _) => assert_eq!(t, global_topic), + _ => panic!("wrong expected outcome from initial commit validation"), + } + } + + let commit_to_send = encoded_commit.clone(); + let network_bridge = tester.net_handle.clone(); + + // asking for global communication will cause the test network + // to send us an event asking us for a stream. use it to + // send a message. + let sender_id = id; + let send_message = tester.filter_network_events(move |event| match event { + Event::EventStream(sender) => { + let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { + remote: sender_id, + protocol: grandpa_protocol_name::NAME.into(), + negotiated_fallback: None, + role: ObservedRole::Full, + received_handshake: vec![], + }); + let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { + remote: sender_id, + messages: vec![( + grandpa_protocol_name::NAME.into(), + commit_to_send.clone().into(), + )], + }); + + true + }, + _ => false, + }); + + // when the commit comes in, we'll tell the callback it was bad. + let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::bad()); + }, + _ => panic!("commit expected"), + }); + + // once the message is sent and commit is "handled" we should have + // a report event coming from the network. + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::Report(who, cost_benefit) => + who == id && cost_benefit == super::cost::INVALID_COMMIT, + _ => false, + }) + }) + .map(|_| ()); + + // Poll both the future sending and handling the commit, as well as the underlying + // NetworkBridge. Complete once the former completes. + future::select(fut, network_bridge) + }); + + futures::executor::block_on(test); +} + +#[test] +fn peer_with_higher_view_leads_to_catch_up_request() { + let id = PeerId::random(); + + let (tester, mut net) = make_test_network(); + let test = tester + .map(move |tester| { + // register a peer with authority role. + tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Authority); + (tester, id) + }) + .then(move |(tester, id)| { + // send neighbor message at round 10 and height 50 + let result = tester.gossip_validator.validate( + &mut net, + &id, + &gossip::GossipMessage::::from(gossip::NeighborPacket { + set_id: SetId(0), + round: Round(10), + commit_finalized_height: 50, + }) + .encode(), + ); + + // neighbor packets are always discard + match result { + sc_network_gossip::ValidationResult::Discard => {}, + _ => panic!("wrong expected outcome from neighbor validation"), + } + + // a catch up request should be sent to the peer for round - 1 + tester + .filter_network_events(move |event| match event { + Event::WriteNotification(peer, message) => { + assert_eq!(peer, id); + + assert_eq!( + message, + gossip::GossipMessage::::CatchUpRequest( + gossip::CatchUpRequestMessage { set_id: SetId(0), round: Round(9) } + ) + .encode(), + ); + + true + }, + _ => false, + }) + .map(|_| ()) + }); + + futures::executor::block_on(test); +} + +fn local_chain_spec() -> Box { + use sc_chain_spec::{ChainSpec, GenericChainSpec}; + use serde::{Deserialize, Serialize}; + use sp_runtime::{BuildStorage, Storage}; + + #[derive(Debug, Serialize, Deserialize)] + struct Genesis(std::collections::BTreeMap); + impl BuildStorage for Genesis { + fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { + storage.top.extend( + self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), + ); + Ok(()) + } + } + let chain_spec = GenericChainSpec::::from_json_bytes( + &include_bytes!("../../../../chain-spec/res/chain_spec.json")[..], + ) + .unwrap(); + chain_spec.cloned_box() +} + +#[test] +fn grandpa_protocol_name() { + let chain_spec = local_chain_spec(); + + // Create protocol name using random genesis hash. + let genesis_hash = sp_core::H256::random(); + let expected = format!("/{}/grandpa/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); + let proto_name = grandpa_protocol_name::standard_name(&genesis_hash, &chain_spec); + assert_eq!(proto_name.to_string(), expected); + + // Create protocol name using hardcoded genesis hash. Verify exact representation. + let genesis_hash = [ + 53, 79, 112, 97, 119, 217, 39, 202, 147, 138, 225, 38, 88, 182, 215, 185, 110, 88, 8, 53, + 125, 210, 158, 151, 50, 113, 102, 59, 245, 199, 221, 240, + ]; + let expected = + "/354f706177d927ca938ae12658b6d7b96e5808357dd29e973271663bf5c7ddf0/grandpa/1".to_string(); + let proto_name = grandpa_protocol_name::standard_name(&genesis_hash, &chain_spec); + assert_eq!(proto_name.to_string(), expected); +} diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs new file mode 100644 index 00000000..112bd514 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/environment.rs @@ -0,0 +1,1536 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{ + collections::{BTreeMap, HashMap}, + iter::FromIterator, + marker::PhantomData, + pin::Pin, + sync::Arc, + time::Duration, +}; + +use finality_grandpa::{ + round::State as RoundState, voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError, +}; +use futures::prelude::*; +use futures_timer::Delay; +use log::{debug, warn}; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::RwLock; +use prometheus_endpoint::{register, Counter, Gauge, PrometheusError, U64}; + +use sc_client_api::{ + backend::{apply_aux, Backend as BackendT}, + utils::is_descendent_of, +}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sp_blockchain::HeaderMetadata; +use sp_consensus::SelectChain as SelectChainT; +use sp_consensus_grandpa::{ + AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GrandpaApi, RoundNumber, + SetId, GRANDPA_ENGINE_ID, +}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; + +use crate::{ + authorities::{AuthoritySet, SharedAuthoritySet}, + communication::{Network as NetworkT, Syncing as SyncingT}, + justification::GrandpaJustification, + local_authority_id, + notification::GrandpaJustificationSender, + until_imported::UntilVoteTargetImported, + voting_rule::VotingRule as VotingRuleT, + ClientForGrandpa, CommandOrError, Commit, Config, Error, NewAuthoritySet, Precommit, Prevote, + PrimaryPropose, SignedMessage, VoterCommand, LOG_TARGET, +}; + +type HistoricalVotes = finality_grandpa::HistoricalVotes< + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, +>; + +/// Data about a completed round. The set of votes that is stored must be +/// minimal, i.e. at most one equivocation is stored per voter. +#[derive(Debug, Clone, Decode, Encode, PartialEq)] +pub struct CompletedRound { + /// The round number. + pub number: RoundNumber, + /// The round state (prevote ghost, estimate, finalized, etc.) + pub state: RoundState>, + /// The target block base used for voting in the round. + pub base: (Block::Hash, NumberFor), + /// All the votes observed in the round. + pub votes: Vec>, +} + +// Data about last completed rounds within a single voter set. Stores +// NUM_LAST_COMPLETED_ROUNDS and always contains data about at least one round +// (genesis). +#[derive(Debug, Clone, PartialEq)] +pub struct CompletedRounds { + rounds: Vec>, + set_id: SetId, + voters: Vec, +} + +// NOTE: the current strategy for persisting completed rounds is very naive +// (update everything) and we also rely on cloning to do atomic updates, +// therefore this value should be kept small for now. +const NUM_LAST_COMPLETED_ROUNDS: usize = 2; + +impl Encode for CompletedRounds { + fn encode(&self) -> Vec { + let v = Vec::from_iter(&self.rounds); + (&v, &self.set_id, &self.voters).encode() + } +} + +impl parity_scale_codec::EncodeLike for CompletedRounds {} + +impl Decode for CompletedRounds { + fn decode( + value: &mut I, + ) -> Result { + <(Vec>, SetId, Vec)>::decode(value) + .map(|(rounds, set_id, voters)| CompletedRounds { rounds, set_id, voters }) + } +} + +impl CompletedRounds { + /// Create a new completed rounds tracker with NUM_LAST_COMPLETED_ROUNDS capacity. + pub(crate) fn new( + genesis: CompletedRound, + set_id: SetId, + voters: &AuthoritySet>, + ) -> CompletedRounds { + let mut rounds = Vec::with_capacity(NUM_LAST_COMPLETED_ROUNDS); + rounds.push(genesis); + + let voters = voters.current_authorities.iter().map(|(a, _)| a.clone()).collect(); + CompletedRounds { rounds, set_id, voters } + } + + /// Get the set-id and voter set of the completed rounds. + pub fn set_info(&self) -> (SetId, &[AuthorityId]) { + (self.set_id, &self.voters[..]) + } + + /// Iterate over all completed rounds. + pub fn iter(&self) -> impl Iterator> { + self.rounds.iter().rev() + } + + /// Returns the last (latest) completed round. + pub fn last(&self) -> &CompletedRound { + self.rounds + .first() + .expect("inner is never empty; always contains at least genesis; qed") + } + + /// Push a new completed round, oldest round is evicted if number of rounds + /// is higher than `NUM_LAST_COMPLETED_ROUNDS`. + pub fn push(&mut self, completed_round: CompletedRound) { + use std::cmp::Reverse; + + match self + .rounds + .binary_search_by_key(&Reverse(completed_round.number), |completed_round| { + Reverse(completed_round.number) + }) { + Ok(idx) => self.rounds[idx] = completed_round, + Err(idx) => self.rounds.insert(idx, completed_round), + }; + + if self.rounds.len() > NUM_LAST_COMPLETED_ROUNDS { + self.rounds.pop(); + } + } +} + +/// A map with voter status information for currently live rounds, +/// which votes have we cast and what are they. +pub type CurrentRounds = BTreeMap::Header>>; + +/// The state of the current voter set, whether it is currently active or not +/// and information related to the previously completed rounds. Current round +/// voting status is used when restarting the voter, i.e. it will re-use the +/// previous votes for a given round if appropriate (same round and same local +/// key). +#[derive(Debug, Decode, Encode, PartialEq)] +pub enum VoterSetState { + /// The voter is live, i.e. participating in rounds. + Live { + /// The previously completed rounds. + completed_rounds: CompletedRounds, + /// Voter status for the currently live rounds. + current_rounds: CurrentRounds, + }, + /// The voter is paused, i.e. not casting or importing any votes. + Paused { + /// The previously completed rounds. + completed_rounds: CompletedRounds, + }, +} + +impl VoterSetState { + /// Create a new live VoterSetState with round 0 as a completed round using + /// the given genesis state and the given authorities. Round 1 is added as a + /// current round (with state `HasVoted::No`). + pub(crate) fn live( + set_id: SetId, + authority_set: &AuthoritySet>, + genesis_state: (Block::Hash, NumberFor), + ) -> VoterSetState { + let state = RoundState::genesis((genesis_state.0, genesis_state.1)); + let completed_rounds = CompletedRounds::new( + CompletedRound { + number: 0, + state, + base: (genesis_state.0, genesis_state.1), + votes: Vec::new(), + }, + set_id, + authority_set, + ); + + let mut current_rounds = CurrentRounds::::new(); + current_rounds.insert(1, HasVoted::No); + + VoterSetState::Live { completed_rounds, current_rounds } + } + + /// Returns the last completed rounds. + pub(crate) fn completed_rounds(&self) -> CompletedRounds { + match self { + VoterSetState::Live { completed_rounds, .. } => completed_rounds.clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.clone(), + } + } + + /// Returns the last completed round. + pub(crate) fn last_completed_round(&self) -> CompletedRound { + match self { + VoterSetState::Live { completed_rounds, .. } => completed_rounds.last().clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.last().clone(), + } + } + + /// Returns the voter set state validating that it includes the given round + /// in current rounds and that the voter isn't paused. + pub fn with_current_round( + &self, + round: RoundNumber, + ) -> Result<(&CompletedRounds, &CurrentRounds), Error> { + if let VoterSetState::Live { completed_rounds, current_rounds } = self { + if current_rounds.contains_key(&round) { + Ok((completed_rounds, current_rounds)) + } else { + let msg = "Voter acting on a live round we are not tracking."; + Err(Error::Safety(msg.to_string())) + } + } else { + let msg = "Voter acting while in paused state."; + Err(Error::Safety(msg.to_string())) + } + } +} + +/// Whether we've voted already during a prior run of the program. +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +pub enum HasVoted { + /// Has not voted already in this round. + No, + /// Has voted in this round. + Yes(AuthorityId, Vote
), +} + +/// The votes cast by this voter already during a prior run of the program. +#[derive(Debug, Clone, Decode, Encode, PartialEq)] +pub enum Vote { + /// Has cast a proposal. + Propose(PrimaryPropose
), + /// Has cast a prevote. + Prevote(Option>, Prevote
), + /// Has cast a precommit (implies prevote.) + Precommit(Option>, Prevote
, Precommit
), +} + +impl HasVoted
{ + /// Returns the proposal we should vote with (if any.) + pub fn propose(&self) -> Option<&PrimaryPropose
> { + match self { + HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), + HasVoted::Yes(_, Vote::Prevote(propose, _)) | + HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), + _ => None, + } + } + + /// Returns the prevote we should vote with (if any.) + pub fn prevote(&self) -> Option<&Prevote
> { + match self { + HasVoted::Yes(_, Vote::Prevote(_, prevote)) | + HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), + _ => None, + } + } + + /// Returns the precommit we should vote with (if any.) + pub fn precommit(&self) -> Option<&Precommit
> { + match self { + HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => Some(precommit), + _ => None, + } + } + + /// Returns true if the voter can still propose, false otherwise. + pub fn can_propose(&self) -> bool { + self.propose().is_none() + } + + /// Returns true if the voter can still prevote, false otherwise. + pub fn can_prevote(&self) -> bool { + self.prevote().is_none() + } + + /// Returns true if the voter can still precommit, false otherwise. + pub fn can_precommit(&self) -> bool { + self.precommit().is_none() + } +} + +/// A voter set state meant to be shared safely across multiple owners. +#[derive(Clone)] +pub struct SharedVoterSetState { + /// The inner shared `VoterSetState`. + inner: Arc>>, + /// A tracker for the rounds that we are actively participating on (i.e. voting) + /// and the authority id under which we are doing it. + voting: Arc>>, +} + +impl From> for SharedVoterSetState { + fn from(set_state: VoterSetState) -> Self { + SharedVoterSetState::new(set_state) + } +} + +impl SharedVoterSetState { + /// Create a new shared voter set tracker with the given state. + pub(crate) fn new(state: VoterSetState) -> Self { + SharedVoterSetState { + inner: Arc::new(RwLock::new(state)), + voting: Arc::new(RwLock::new(HashMap::new())), + } + } + + /// Read the inner voter set state. + pub(crate) fn read(&self) -> parking_lot::RwLockReadGuard> { + self.inner.read() + } + + /// Get the authority id that we are using to vote on the given round, if any. + pub(crate) fn voting_on(&self, round: RoundNumber) -> Option { + self.voting.read().get(&round).cloned() + } + + /// Note that we started voting on the give round with the given authority id. + pub(crate) fn started_voting_on(&self, round: RoundNumber, local_id: AuthorityId) { + self.voting.write().insert(round, local_id); + } + + /// Note that we have finished voting on the given round. If we were voting on + /// the given round, the authority id that we were using to do it will be + /// cleared. + pub(crate) fn finished_voting_on(&self, round: RoundNumber) { + self.voting.write().remove(&round); + } + + /// Return vote status information for the current round. + pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { + match &*self.inner.read() { + VoterSetState::Live { current_rounds, .. } => current_rounds + .get(&round) + .and_then(|has_voted| match has_voted { + HasVoted::Yes(id, vote) => Some(HasVoted::Yes(id.clone(), vote.clone())), + _ => None, + }) + .unwrap_or(HasVoted::No), + _ => HasVoted::No, + } + } + + // NOTE: not exposed outside of this module intentionally. + fn with(&self, f: F) -> R + where + F: FnOnce(&mut VoterSetState) -> R, + { + f(&mut *self.inner.write()) + } +} + +/// Prometheus metrics for GRANDPA. +#[derive(Clone)] +pub(crate) struct Metrics { + finality_grandpa_round: Gauge, + finality_grandpa_prevotes: Counter, + finality_grandpa_precommits: Counter, +} + +impl Metrics { + pub(crate) fn register( + registry: &prometheus_endpoint::Registry, + ) -> Result { + Ok(Self { + finality_grandpa_round: register( + Gauge::new("substrate_finality_grandpa_round", "Highest completed GRANDPA round.")?, + registry, + )?, + finality_grandpa_prevotes: register( + Counter::new( + "substrate_finality_grandpa_prevotes_total", + "Total number of GRANDPA prevotes cast locally.", + )?, + registry, + )?, + finality_grandpa_precommits: register( + Counter::new( + "substrate_finality_grandpa_precommits_total", + "Total number of GRANDPA precommits cast locally.", + )?, + registry, + )?, + }) + } +} + +/// The environment we run GRANDPA in. +pub(crate) struct Environment< + Backend, + Block: BlockT, + C, + N: NetworkT, + S: SyncingT, + SC, + VR, +> { + pub(crate) client: Arc, + pub(crate) select_chain: SC, + pub(crate) voters: Arc>, + pub(crate) config: Config, + pub(crate) authority_set: SharedAuthoritySet>, + pub(crate) network: crate::communication::NetworkBridge, + pub(crate) set_id: SetId, + pub(crate) voter_set_state: SharedVoterSetState, + pub(crate) voting_rule: VR, + pub(crate) metrics: Option, + pub(crate) justification_sender: Option>, + pub(crate) telemetry: Option, + pub(crate) _phantom: PhantomData, +} + +impl, S: SyncingT, SC, VR> + Environment +{ + /// Updates the voter set state using the given closure. The write lock is + /// held during evaluation of the closure and the environment's voter set + /// state is set to its result if successful. + pub(crate) fn update_voter_set_state(&self, f: F) -> Result<(), Error> + where + F: FnOnce(&VoterSetState) -> Result>, Error>, + { + self.voter_set_state.with(|voter_set_state| { + if let Some(set_state) = f(voter_set_state)? { + *voter_set_state = set_state; + + if let Some(metrics) = self.metrics.as_ref() { + if let VoterSetState::Live { completed_rounds, .. } = voter_set_state { + let highest = completed_rounds + .rounds + .iter() + .map(|round| round.number) + .max() + .expect("There is always one completed round (genesis); qed"); + + metrics.finality_grandpa_round.set(highest); + } + } + } + Ok(()) + }) + } +} + +impl Environment +where + Block: BlockT, + BE: BackendT, + C: ClientForGrandpa, + C::Api: GrandpaApi, + N: NetworkT, + S: SyncingT, + SC: SelectChainT, +{ + /// Report the given equivocation to the GRANDPA runtime module. This method + /// generates a session membership proof of the offender and then submits an + /// extrinsic to report the equivocation. In particular, the session membership + /// proof must be generated at the block at which the given set was active which + /// isn't necessarily the best block if there are pending authority set changes. + pub(crate) fn report_equivocation( + &self, + equivocation: Equivocation>, + ) -> Result<(), Error> { + if let Some(local_id) = self.voter_set_state.voting_on(equivocation.round_number()) { + if *equivocation.offender() == local_id { + return Err(Error::Safety( + "Refraining from sending equivocation report for our own equivocation.".into(), + )) + } + } + + let is_descendent_of = is_descendent_of(&*self.client, None); + + let (best_block_hash, best_block_number) = { + // TODO [#9158]: Use SelectChain::best_chain() to get a potentially + // more accurate best block + let info = self.client.info(); + (info.best_hash, info.best_number) + }; + + let authority_set = self.authority_set.inner(); + + // block hash and number of the next pending authority set change in the + // given best chain. + let next_change = authority_set + .next_change(&best_block_hash, &is_descendent_of) + .map_err(|e| Error::Safety(e.to_string()))?; + + // find the hash of the latest block in the current set + let current_set_latest_hash = match next_change { + Some((_, n)) if n.is_zero() => + return Err(Error::Safety("Authority set change signalled at genesis.".to_string())), + // the next set starts at `n` so the current one lasts until `n - 1`. if + // `n` is later than the best block, then the current set is still live + // at best block. + Some((_, n)) if n > best_block_number => best_block_hash, + Some((h, _)) => { + // this is the header at which the new set will start + let header = self.client.header(h)?.expect( + "got block hash from registered pending change; \ + pending changes are only registered on block import; qed.", + ); + + // its parent block is the last block in the current set + *header.parent_hash() + }, + // there is no pending change, the latest block for the current set is + // the best block. + None => best_block_hash, + }; + + // generate key ownership proof at that block + let key_owner_proof = match self + .client + .runtime_api() + .generate_key_ownership_proof( + current_set_latest_hash, + authority_set.set_id, + equivocation.offender().clone(), + ) + .map_err(Error::RuntimeApi)? + { + Some(proof) => proof, + None => { + debug!( + target: LOG_TARGET, + "Equivocation offender is not part of the authority set." + ); + return Ok(()) + }, + }; + + // submit equivocation report at **best** block + let equivocation_proof = EquivocationProof::new(authority_set.set_id, equivocation); + + self.client + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + best_block_hash, + equivocation_proof, + key_owner_proof, + ) + .map_err(Error::RuntimeApi)?; + + Ok(()) + } +} + +impl finality_grandpa::Chain> + for Environment +where + Block: BlockT, + BE: BackendT, + C: ClientForGrandpa, + N: NetworkT, + S: SyncingT, + SC: SelectChainT, + VR: VotingRuleT, + NumberFor: BlockNumberOps, +{ + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + ancestry(&self.client, base, block) + } +} + +pub(crate) fn ancestry( + client: &Arc, + base: Block::Hash, + block: Block::Hash, +) -> Result, GrandpaError> +where + Client: HeaderMetadata, +{ + if base == block { + return Err(GrandpaError::NotDescendent) + } + + let tree_route_res = sp_blockchain::tree_route(&**client, block, base); + + let tree_route = match tree_route_res { + Ok(tree_route) => tree_route, + Err(e) => { + debug!( + target: LOG_TARGET, + "Encountered error computing ancestry between block {:?} and base {:?}: {}", + block, + base, + e + ); + + return Err(GrandpaError::NotDescendent) + }, + }; + + if tree_route.common_block().hash != base { + return Err(GrandpaError::NotDescendent) + } + + // skip one because our ancestry is meant to start from the parent of `block`, + // and `tree_route` includes it. + Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) +} + +impl voter::Environment> + for Environment +where + Block: BlockT, + B: BackendT, + C: ClientForGrandpa + 'static, + C::Api: GrandpaApi, + N: NetworkT, + S: SyncingT, + SC: SelectChainT + 'static, + VR: VotingRuleT + Clone + 'static, + NumberFor: BlockNumberOps, +{ + type Timer = Pin> + Send>>; + type BestChain = Pin< + Box< + dyn Future)>, Self::Error>> + + Send, + >, + >; + + type Id = AuthorityId; + type Signature = AuthoritySignature; + + // regular round message streams + type In = Pin< + Box< + dyn Stream< + Item = Result< + ::finality_grandpa::SignedMessage< + Block::Hash, + NumberFor, + Self::Signature, + Self::Id, + >, + Self::Error, + >, + > + Send, + >, + >; + type Out = Pin< + Box< + dyn Sink< + ::finality_grandpa::Message>, + Error = Self::Error, + > + Send, + >, + >; + + type Error = CommandOrError>; + + fn best_chain_containing(&self, block: Block::Hash) -> Self::BestChain { + let client = self.client.clone(); + let authority_set = self.authority_set.clone(); + let select_chain = self.select_chain.clone(); + let voting_rule = self.voting_rule.clone(); + let set_id = self.set_id; + + Box::pin(async move { + // NOTE: when we finalize an authority set change through the sync protocol the voter is + // signaled asynchronously. therefore the voter could still vote in the next round + // before activating the new set. the `authority_set` is updated immediately thus + // we restrict the voter based on that. + if set_id != authority_set.set_id() { + return Ok(None) + } + + best_chain_containing(block, client, authority_set, select_chain, voting_rule) + .await + .map_err(|e| e.into()) + }) + } + + fn round_data( + &self, + round: RoundNumber, + ) -> voter::RoundData { + let prevote_timer = Delay::new(self.config.gossip_duration * 2); + let precommit_timer = Delay::new(self.config.gossip_duration * 4); + + let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); + + let has_voted = match self.voter_set_state.has_voted(round) { + HasVoted::Yes(id, vote) => + if local_id.as_ref().map(|k| k == &id).unwrap_or(false) { + HasVoted::Yes(id, vote) + } else { + HasVoted::No + }, + HasVoted::No => HasVoted::No, + }; + + // NOTE: we cache the local authority id that we'll be using to vote on the + // given round. this is done to make sure we only check for available keys + // from the keystore in this method when beginning the round, otherwise if + // the keystore state changed during the round (e.g. a key was removed) it + // could lead to internal state inconsistencies in the voter environment + // (e.g. we wouldn't update the voter set state after prevoting since there's + // no local authority id). + if let Some(id) = local_id.as_ref() { + self.voter_set_state.started_voting_on(round, id.clone()); + } + + // we can only sign when we have a local key in the authority set + // and we have a reference to the keystore. + let keystore = match (local_id.as_ref(), self.config.keystore.as_ref()) { + (Some(id), Some(keystore)) => Some((id.clone(), keystore.clone()).into()), + _ => None, + }; + + let (incoming, outgoing) = self.network.round_communication( + keystore, + crate::communication::Round(round), + crate::communication::SetId(self.set_id), + self.voters.clone(), + has_voted, + ); + + // schedule incoming messages from the network to be held until + // corresponding blocks are imported. + let incoming = Box::pin( + UntilVoteTargetImported::new( + self.client.import_notification_stream(), + self.network.clone(), + self.client.clone(), + incoming, + "round", + None, + ) + .map_err(Into::into), + ); + + // schedule network message cleanup when sink drops. + let outgoing = Box::pin(outgoing.sink_err_into()); + + voter::RoundData { + voter_id: local_id, + prevote_timer: Box::pin(prevote_timer.map(Ok)), + precommit_timer: Box::pin(precommit_timer.map(Ok)), + incoming, + outgoing, + } + } + + fn proposed( + &self, + round: RoundNumber, + propose: PrimaryPropose, + ) -> Result<(), Self::Error> { + let local_id = match self.voter_set_state.voting_on(round) { + Some(id) => id, + None => return Ok(()), + }; + + self.update_voter_set_state(|voter_set_state| { + let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; + let current_round = current_rounds + .get(&round) + .expect("checked in with_current_round that key exists; qed."); + + if !current_round.can_propose() { + // we've already proposed in this round (in a previous run), + // ignore the given vote and don't update the voter set + // state + return Ok(None) + } + + let mut current_rounds = current_rounds.clone(); + let current_round = current_rounds + .get_mut(&round) + .expect("checked previously that key exists; qed."); + + *current_round = HasVoted::Yes(local_id, Vote::Propose(propose)); + + let set_state = VoterSetState::::Live { + completed_rounds: completed_rounds.clone(), + current_rounds, + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn prevoted( + &self, + round: RoundNumber, + prevote: Prevote, + ) -> Result<(), Self::Error> { + let local_id = match self.voter_set_state.voting_on(round) { + Some(id) => id, + None => return Ok(()), + }; + + let report_prevote_metrics = |prevote: &Prevote| { + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.prevote_issued"; + "round" => round, + "target_number" => ?prevote.target_number, + "target_hash" => ?prevote.target_hash, + ); + + if let Some(metrics) = self.metrics.as_ref() { + metrics.finality_grandpa_prevotes.inc(); + } + }; + + self.update_voter_set_state(|voter_set_state| { + let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; + let current_round = current_rounds + .get(&round) + .expect("checked in with_current_round that key exists; qed."); + + if !current_round.can_prevote() { + // we've already prevoted in this round (in a previous run), + // ignore the given vote and don't update the voter set + // state + return Ok(None) + } + + // report to telemetry and prometheus + report_prevote_metrics(&prevote); + + let propose = current_round.propose(); + + let mut current_rounds = current_rounds.clone(); + let current_round = current_rounds + .get_mut(&round) + .expect("checked previously that key exists; qed."); + + *current_round = HasVoted::Yes(local_id, Vote::Prevote(propose.cloned(), prevote)); + + let set_state = VoterSetState::::Live { + completed_rounds: completed_rounds.clone(), + current_rounds, + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn precommitted( + &self, + round: RoundNumber, + precommit: Precommit, + ) -> Result<(), Self::Error> { + let local_id = match self.voter_set_state.voting_on(round) { + Some(id) => id, + None => return Ok(()), + }; + + let report_precommit_metrics = |precommit: &Precommit| { + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.precommit_issued"; + "round" => round, + "target_number" => ?precommit.target_number, + "target_hash" => ?precommit.target_hash, + ); + + if let Some(metrics) = self.metrics.as_ref() { + metrics.finality_grandpa_precommits.inc(); + } + }; + + self.update_voter_set_state(|voter_set_state| { + let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; + let current_round = current_rounds + .get(&round) + .expect("checked in with_current_round that key exists; qed."); + + if !current_round.can_precommit() { + // we've already precommitted in this round (in a previous run), + // ignore the given vote and don't update the voter set + // state + return Ok(None) + } + + // report to telemetry and prometheus + report_precommit_metrics(&precommit); + + let propose = current_round.propose(); + let prevote = match current_round { + HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, + _ => { + let msg = "Voter precommitting before prevoting."; + return Err(Error::Safety(msg.to_string())) + }, + }; + + let mut current_rounds = current_rounds.clone(); + let current_round = current_rounds + .get_mut(&round) + .expect("checked previously that key exists; qed."); + + *current_round = HasVoted::Yes( + local_id, + Vote::Precommit(propose.cloned(), prevote.clone(), precommit), + ); + + let set_state = VoterSetState::::Live { + completed_rounds: completed_rounds.clone(), + current_rounds, + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn completed( + &self, + round: RoundNumber, + state: RoundState>, + base: (Block::Hash, NumberFor), + historical_votes: &HistoricalVotes, + ) -> Result<(), Self::Error> { + debug!( + target: LOG_TARGET, + "Voter {} completed round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", + self.config.name(), + round, + self.set_id, + state.estimate.as_ref().map(|e| e.1), + state.finalized.as_ref().map(|e| e.1), + ); + + self.update_voter_set_state(|voter_set_state| { + // NOTE: we don't use `with_current_round` here, it is possible that + // we are not currently tracking this round if it is a round we + // caught up to. + let (completed_rounds, current_rounds) = + if let VoterSetState::Live { completed_rounds, current_rounds } = voter_set_state { + (completed_rounds, current_rounds) + } else { + let msg = "Voter acting while in paused state."; + return Err(Error::Safety(msg.to_string())) + }; + + let mut completed_rounds = completed_rounds.clone(); + + // TODO: Future integration will store the prevote and precommit index. See #2611. + let votes = historical_votes.seen().to_vec(); + + completed_rounds.push(CompletedRound { + number: round, + state: state.clone(), + base, + votes, + }); + + // remove the round from live rounds and start tracking the next round + let mut current_rounds = current_rounds.clone(); + current_rounds.remove(&round); + + // NOTE: this entry should always exist as GRANDPA rounds are always + // started in increasing order, still it's better to play it safe. + current_rounds.entry(round + 1).or_insert(HasVoted::No); + + let set_state = VoterSetState::::Live { completed_rounds, current_rounds }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + // clear any cached local authority id associated with this round + self.voter_set_state.finished_voting_on(round); + + Ok(()) + } + + fn concluded( + &self, + round: RoundNumber, + state: RoundState>, + _base: (Block::Hash, NumberFor), + historical_votes: &HistoricalVotes, + ) -> Result<(), Self::Error> { + debug!( + target: LOG_TARGET, + "Voter {} concluded round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", + self.config.name(), + round, + self.set_id, + state.estimate.as_ref().map(|e| e.1), + state.finalized.as_ref().map(|e| e.1), + ); + + self.update_voter_set_state(|voter_set_state| { + // NOTE: we don't use `with_current_round` here, because a concluded + // round is completed and cannot be current. + let (completed_rounds, current_rounds) = + if let VoterSetState::Live { completed_rounds, current_rounds } = voter_set_state { + (completed_rounds, current_rounds) + } else { + let msg = "Voter acting while in paused state."; + return Err(Error::Safety(msg.to_string())) + }; + + let mut completed_rounds = completed_rounds.clone(); + + if let Some(already_completed) = + completed_rounds.rounds.iter_mut().find(|r| r.number == round) + { + let n_existing_votes = already_completed.votes.len(); + + // the interface of Environment guarantees that the previous `historical_votes` + // from `completable` is a prefix of what is passed to `concluded`. + already_completed + .votes + .extend(historical_votes.seen().iter().skip(n_existing_votes).cloned()); + already_completed.state = state; + crate::aux_schema::write_concluded_round(&*self.client, already_completed)?; + } + + let set_state = VoterSetState::::Live { + completed_rounds, + current_rounds: current_rounds.clone(), + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn finalize_block( + &self, + hash: Block::Hash, + number: NumberFor, + round: RoundNumber, + commit: Commit, + ) -> Result<(), Self::Error> { + finalize_block( + self.client.clone(), + &self.authority_set, + Some(self.config.justification_generation_period), + hash, + number, + (round, commit).into(), + false, + self.justification_sender.as_ref(), + self.telemetry.clone(), + ) + } + + fn round_commit_timer(&self) -> Self::Timer { + use rand::{thread_rng, Rng}; + + // random between `[0, 2 * gossip_duration]` seconds. + let delay: u64 = + thread_rng().gen_range(0..2 * self.config.gossip_duration.as_millis() as u64); + Box::pin(Delay::new(Duration::from_millis(delay)).map(Ok)) + } + + fn prevote_equivocation( + &self, + _round: RoundNumber, + equivocation: finality_grandpa::Equivocation< + Self::Id, + Prevote, + Self::Signature, + >, + ) { + warn!( + target: LOG_TARGET, + "Detected prevote equivocation in the finality worker: {:?}", equivocation + ); + if let Err(err) = self.report_equivocation(equivocation.into()) { + warn!(target: LOG_TARGET, "Error reporting prevote equivocation: {}", err); + } + } + + fn precommit_equivocation( + &self, + _round: RoundNumber, + equivocation: finality_grandpa::Equivocation< + Self::Id, + Precommit, + Self::Signature, + >, + ) { + warn!( + target: LOG_TARGET, + "Detected precommit equivocation in the finality worker: {:?}", equivocation + ); + if let Err(err) = self.report_equivocation(equivocation.into()) { + warn!(target: LOG_TARGET, "Error reporting precommit equivocation: {}", err); + } + } +} + +pub(crate) enum JustificationOrCommit { + Justification(GrandpaJustification), + Commit((RoundNumber, Commit)), +} + +impl From<(RoundNumber, Commit)> for JustificationOrCommit { + fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { + JustificationOrCommit::Commit(commit) + } +} + +impl From> for JustificationOrCommit { + fn from(justification: GrandpaJustification) -> JustificationOrCommit { + JustificationOrCommit::Justification(justification) + } +} + +async fn best_chain_containing( + block: Block::Hash, + client: Arc, + authority_set: SharedAuthoritySet>, + select_chain: SelectChain, + voting_rule: VotingRule, +) -> Result)>, Error> +where + Backend: BackendT, + Block: BlockT, + Client: ClientForGrandpa, + SelectChain: SelectChainT + 'static, + VotingRule: VotingRuleT, +{ + let base_header = match client.header(block)? { + Some(h) => h, + None => { + warn!( + target: LOG_TARGET, + "Encountered error finding best chain containing {:?}: couldn't find base block", + block, + ); + + return Ok(None) + }, + }; + + // we refuse to vote beyond the current limit number where transitions are scheduled to occur. + // once blocks are finalized that make that transition irrelevant or activate it, we will + // proceed onwards. most of the time there will be no pending transition. the limit, if any, is + // guaranteed to be higher than or equal to the given base number. + let limit = authority_set.current_limit(*base_header.number()); + debug!( + target: LOG_TARGET, + "Finding best chain containing block {:?} with number limit {:?}", block, limit + ); + + let mut target_header = match select_chain.finality_target(block, None).await { + Ok(target_hash) => client + .header(target_hash)? + .expect("Header known to exist after `finality_target` call; qed"), + Err(err) => { + warn!( + target: LOG_TARGET, + "Encountered error finding best chain containing {:?}: couldn't find target block: {}", + block, + err, + ); + + return Ok(None) + }, + }; + + // NOTE: this is purposefully done after `finality_target` to prevent a case + // where in-between these two requests there is a block import and + // `finality_target` returns something higher than `best_chain`. + let mut best_header = match select_chain.best_chain().await { + Ok(best_header) => best_header, + Err(err) => { + warn!( + target: LOG_TARGET, + "Encountered error finding best chain containing {:?}: couldn't find best block: {}", + block, + err, + ); + + return Ok(None) + }, + }; + + let is_descendent_of = is_descendent_of(&*client, None); + + if target_header.number() > best_header.number() || + target_header.number() == best_header.number() && + target_header.hash() != best_header.hash() || + !is_descendent_of(&target_header.hash(), &best_header.hash())? + { + debug!( + target: LOG_TARGET, + "SelectChain returned a finality target inconsistent with its best block. Restricting best block to target block" + ); + + best_header = target_header.clone(); + } + + debug!( + target: LOG_TARGET, + "SelectChain: finality target: #{} ({}), best block: #{} ({})", + target_header.number(), + target_header.hash(), + best_header.number(), + best_header.hash(), + ); + + // check if our vote is currently being limited due to a pending change, + // in which case we will restrict our target header to the given limit + if let Some(target_number) = limit.filter(|limit| limit < target_header.number()) { + // walk backwards until we find the target block + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ + blocks are stored contiguously; \ + qed" + ); + } + + if *target_header.number() == target_number { + break + } + + target_header = client + .header(*target_header.parent_hash())? + .expect("Header known to exist after `finality_target` call; qed"); + } + + debug!( + target: LOG_TARGET, + "Finality target restricted to #{} ({}) due to pending authority set change", + target_header.number(), + target_header.hash() + ) + } + + // restrict vote according to the given voting rule, if the voting rule + // doesn't restrict the vote then we keep the previous target. + // + // we also make sure that the restricted vote is higher than the round base + // (i.e. last finalized), otherwise the value returned by the given voting + // rule is ignored and the original target is used instead. + Ok(voting_rule + .restrict_vote(client.clone(), &base_header, &best_header, &target_header) + .await + .filter(|(_, restricted_number)| { + // we can only restrict votes within the interval [base, target] + restricted_number >= base_header.number() && restricted_number < target_header.number() + }) + .or_else(|| Some((target_header.hash(), *target_header.number())))) +} + +/// Whether we should process a justification for the given block. +/// +/// This can be used to decide whether to import a justification (when +/// importing a block), or whether to generate a justification from a +/// commit (when validating). Justifications for blocks that change the +/// authority set will always be processed, otherwise we'll only process +/// justifications if the last one was `justification_period` blocks ago. +pub(crate) fn should_process_justification( + client: &Client, + justification_period: u32, + number: NumberFor, + enacts_change: bool, +) -> bool +where + Block: BlockT, + BE: BackendT, + Client: ClientForGrandpa, +{ + if enacts_change { + return true + } + + let last_finalized_number = client.info().finalized_number; + + // keep the first justification before reaching the justification period + if last_finalized_number.is_zero() { + return true + } + + last_finalized_number / justification_period.into() != number / justification_period.into() +} + +/// Finalize the given block and apply any authority set changes. If an +/// authority set change is enacted then a justification is created (if not +/// given) and stored with the block when finalizing it. +/// This method assumes that the block being finalized has already been imported. +pub(crate) fn finalize_block( + client: Arc, + authority_set: &SharedAuthoritySet>, + justification_generation_period: Option, + hash: Block::Hash, + number: NumberFor, + justification_or_commit: JustificationOrCommit, + initial_sync: bool, + justification_sender: Option<&GrandpaJustificationSender>, + telemetry: Option, +) -> Result<(), CommandOrError>> +where + Block: BlockT, + BE: BackendT, + Client: ClientForGrandpa, +{ + // NOTE: lock must be held through writing to DB to avoid race. this lock + // also implicitly synchronizes the check for last finalized number + // below. + let mut authority_set = authority_set.inner(); + + let status = client.info(); + + if number <= status.finalized_number && client.hash(number)? == Some(hash) { + // This can happen after a forced change (triggered manually from the runtime when + // finality is stalled), since the voter will be restarted at the median last finalized + // block, which can be lower than the local best finalized block. + warn!(target: LOG_TARGET, "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", + hash, + number, + status.finalized_number, + ); + + return Ok(()) + } + + // FIXME #1483: clone only when changed + let old_authority_set = authority_set.clone(); + + let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { + let status = authority_set + .apply_standard_changes( + hash, + number, + &is_descendent_of::(&*client, None), + initial_sync, + None, + ) + .map_err(|e| Error::Safety(e.to_string()))?; + + // send a justification notification if a sender exists and in case of error log it. + fn notify_justification( + justification_sender: Option<&GrandpaJustificationSender>, + justification: impl FnOnce() -> Result, Error>, + ) { + if let Some(sender) = justification_sender { + if let Err(err) = sender.notify(justification) { + warn!( + target: LOG_TARGET, + "Error creating justification for subscriber: {}", err + ); + } + } + } + + // NOTE: this code assumes that honest voters will never vote past a + // transition block, thus we don't have to worry about the case where + // we have a transition with `effective_block = N`, but we finalize + // `N+1`. this assumption is required to make sure we store + // justifications for transition blocks which will be requested by + // syncing clients. + let (justification_required, justification) = match justification_or_commit { + JustificationOrCommit::Justification(justification) => (true, justification), + JustificationOrCommit::Commit((round_number, commit)) => { + let enacts_change = status.new_set_block.is_some(); + + let justification_required = justification_generation_period + .map(|period| { + should_process_justification(&*client, period, number, enacts_change) + }) + .unwrap_or(enacts_change); + + let justification = + GrandpaJustification::from_commit(&client, round_number, commit)?; + + (justification_required, justification) + }, + }; + + notify_justification(justification_sender, || Ok(justification.clone())); + + let persisted_justification = if justification_required { + Some((GRANDPA_ENGINE_ID, justification.encode())) + } else { + None + }; + + // ideally some handle to a synchronization oracle would be used + // to avoid unconditionally notifying. + client + .apply_finality(import_op, hash, persisted_justification, true) + .map_err(|e| { + warn!( + target: LOG_TARGET, + "Error applying finality to block {:?}: {}", + (hash, number), + e + ); + e + })?; + + debug!(target: LOG_TARGET, "Finalizing blocks up to ({:?}, {})", number, hash); + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.finalized_blocks_up_to"; + "number" => ?number, "hash" => ?hash, + ); + + crate::aux_schema::update_best_justification(&justification, |insert| { + apply_aux(import_op, insert, &[]) + })?; + + let new_authorities = if let Some((canon_hash, canon_number)) = status.new_set_block { + // the authority set has changed. + let (new_id, set_ref) = authority_set.current(); + + if set_ref.len() > 16 { + grandpa_log!( + initial_sync, + "👴 Applying GRANDPA set change to new set with {} authorities", + set_ref.len(), + ); + } else { + grandpa_log!( + initial_sync, + "👴 Applying GRANDPA set change to new set {:?}", + set_ref + ); + } + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.generating_new_authority_set"; + "number" => ?canon_number, "hash" => ?canon_hash, + "authorities" => ?set_ref.to_vec(), + "set_id" => ?new_id, + ); + Some(NewAuthoritySet { + canon_hash, + canon_number, + set_id: new_id, + authorities: set_ref.to_vec(), + }) + } else { + None + }; + + if status.changed { + let write_result = crate::aux_schema::update_authority_set::( + &authority_set, + new_authorities.as_ref(), + |insert| apply_aux(import_op, insert, &[]), + ); + + if let Err(e) = write_result { + warn!( + target: LOG_TARGET, + "Failed to write updated authority set to disk. Bailing." + ); + warn!(target: LOG_TARGET, "Node is in a potentially inconsistent state."); + + return Err(e.into()) + } + } + + Ok(new_authorities.map(VoterCommand::ChangeAuthorities)) + }); + + match update_res { + Ok(Some(command)) => Err(CommandOrError::VoterCommand(command)), + Ok(None) => Ok(()), + Err(e) => { + *authority_set = old_authority_set; + + Err(CommandOrError::Error(e)) + }, + } +} diff --git a/substrate/client/consensus/grandpa/src/finality_proof.rs b/substrate/client/consensus/grandpa/src/finality_proof.rs new file mode 100644 index 00000000..8a8a6885 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/finality_proof.rs @@ -0,0 +1,591 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! GRANDPA block finality proof generation and check. +//! +//! Finality of block B is proved by providing: +//! 1) the justification for the descendant block F; +//! 2) headers sub-chain (B; F] if B != F; +//! 3) proof of GRANDPA::authorities() if the set changes at block F. +//! +//! Since earliest possible justification is returned, the GRANDPA authorities set +//! at the block F is guaranteed to be the same as in the block B (this is because block +//! that enacts new GRANDPA authorities set always comes with justification). It also +//! means that the `set_id` is the same at blocks B and F. +//! +//! Let U be the last finalized block known to caller. If authorities set has changed several +//! times in the (U; F] interval, multiple finality proof fragments are returned (one for each +//! authority set change) and they must be verified in-order. +//! +//! Finality proof provider can choose how to provide finality proof on its own. The incomplete +//! finality proof (that finalizes some block C that is ancestor of the B and descendant +//! of the U) could be returned. + +use log::{trace, warn}; +use std::sync::Arc; + +use parity_scale_codec::{Decode, Encode}; +use sc_client_api::backend::Backend; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; +use sp_consensus_grandpa::GRANDPA_ENGINE_ID; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, +}; + +use crate::{ + authorities::{AuthoritySetChangeId, AuthoritySetChanges}, + best_justification, + justification::GrandpaJustification, + SharedAuthoritySet, LOG_TARGET, +}; + +const MAX_UNKNOWN_HEADERS: usize = 100_000; + +/// Finality proof provider for serving network requests. +#[derive(Clone)] +pub struct FinalityProofProvider { + backend: Arc, + shared_authority_set: Option>>, +} + +impl FinalityProofProvider +where + Block: BlockT, + B: Backend, +{ + /// Create new finality proof provider using: + /// + /// - backend for accessing blockchain data; + /// - authority_provider for calling and proving runtime methods. + /// - shared_authority_set for accessing authority set data + pub fn new( + backend: Arc, + shared_authority_set: Option>>, + ) -> Self { + FinalityProofProvider { backend, shared_authority_set } + } + + /// Create new finality proof provider for the service using: + /// + /// - backend for accessing blockchain data; + /// - storage_provider, which is generally a client. + /// - shared_authority_set for accessing authority set data + pub fn new_for_service( + backend: Arc, + shared_authority_set: Option>>, + ) -> Arc { + Arc::new(Self::new(backend, shared_authority_set)) + } +} + +impl FinalityProofProvider +where + Block: BlockT, + B: Backend, +{ + /// Prove finality for the given block number by returning a Justification for the last block of + /// the authority set in bytes. + pub fn prove_finality( + &self, + block: NumberFor, + ) -> Result>, FinalityProofError> { + Ok(self.prove_finality_proof(block, true)?.map(|proof| proof.encode())) + } + + /// Prove finality for the given block number by returning a Justification for the last block of + /// the authority set. + /// + /// If `collect_unknown_headers` is true, the finality proof will include all headers from the + /// requested block until the block the justification refers to. + pub fn prove_finality_proof( + &self, + block: NumberFor, + collect_unknown_headers: bool, + ) -> Result>, FinalityProofError> { + let authority_set_changes = if let Some(changes) = self + .shared_authority_set + .as_ref() + .map(SharedAuthoritySet::authority_set_changes) + { + changes + } else { + return Ok(None) + }; + + prove_finality(&*self.backend, authority_set_changes, block, collect_unknown_headers) + } +} + +/// Finality for block B is proved by providing: +/// 1) the justification for the descendant block F; +/// 2) headers sub-chain (B; F] if B != F; +#[derive(Debug, PartialEq, Encode, Decode, Clone)] +pub struct FinalityProof { + /// The hash of block F for which justification is provided. + pub block: Header::Hash, + /// Justification of the block F. + pub justification: Vec, + /// The set of headers in the range (B; F] that we believe are unknown to the caller. Ordered. + pub unknown_headers: Vec
, +} + +/// Errors occurring when trying to prove finality +#[derive(Debug, thiserror::Error)] +pub enum FinalityProofError { + /// The requested block has not yet been finalized. + #[error("Block not yet finalized")] + BlockNotYetFinalized, + /// The requested block is not covered by authority set changes. Likely this means the block is + /// in the latest authority set, and the subscription API is more appropriate. + #[error("Block not covered by authority set changes")] + BlockNotInAuthoritySetChanges, + /// Errors originating from the client. + #[error(transparent)] + Client(#[from] sp_blockchain::Error), +} + +/// Prove finality for the given block number by returning a justification for the last block of +/// the authority set of which the given block is part of, or a justification for the latest +/// finalized block if the given block is part of the current authority set. +/// +/// If `collect_unknown_headers` is true, the finality proof will include all headers from the +/// requested block until the block the justification refers to. +fn prove_finality( + backend: &B, + authority_set_changes: AuthoritySetChanges>, + block: NumberFor, + collect_unknown_headers: bool, +) -> Result>, FinalityProofError> +where + Block: BlockT, + B: Backend, +{ + // Early-return if we are sure that there are no blocks finalized that cover the requested + // block. + let finalized_number = backend.blockchain().info().finalized_number; + if finalized_number < block { + let err = format!( + "Requested finality proof for descendant of #{} while we only have finalized #{}.", + block, finalized_number, + ); + trace!(target: LOG_TARGET, "{}", &err); + return Err(FinalityProofError::BlockNotYetFinalized) + } + + let (justification, just_block) = match authority_set_changes.get_set_id(block) { + AuthoritySetChangeId::Latest => { + if let Some(justification) = best_justification(backend)? + .map(|j: GrandpaJustification| (j.encode(), j.target().0)) + { + justification + } else { + trace!( + target: LOG_TARGET, + "No justification found for the latest finalized block. \ + Returning empty proof.", + ); + return Ok(None) + } + }, + AuthoritySetChangeId::Set(_, last_block_for_set) => { + let last_block_for_set_id = backend + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(last_block_for_set))?; + let justification = if let Some(grandpa_justification) = backend + .blockchain() + .justifications(last_block_for_set_id)? + .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) + { + grandpa_justification + } else { + trace!( + target: LOG_TARGET, + "No justification found when making finality proof for {}. \ + Returning empty proof.", + block, + ); + return Ok(None) + }; + (justification, last_block_for_set) + }, + AuthoritySetChangeId::Unknown => { + warn!( + target: LOG_TARGET, + "AuthoritySetChanges does not cover the requested block #{} due to missing data. \ + You need to resync to populate AuthoritySetChanges properly.", + block, + ); + return Err(FinalityProofError::BlockNotInAuthoritySetChanges) + }, + }; + + let mut headers = Vec::new(); + if collect_unknown_headers { + // Collect all headers from the requested block until the last block of the set + let mut current = block + One::one(); + loop { + if current > just_block || headers.len() >= MAX_UNKNOWN_HEADERS { + break + } + let hash = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(current))?; + headers.push(backend.blockchain().expect_header(hash)?); + current += One::one(); + } + }; + + Ok(Some(FinalityProof { + block: backend.blockchain().expect_block_hash_from_id(&BlockId::Number(just_block))?, + justification, + unknown_headers: headers, + })) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{authorities::AuthoritySetChanges, BlockNumberOps, ClientError, SetId}; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::{apply_aux, LockImportRun}; + use sp_consensus::BlockOrigin; + use sp_consensus_grandpa::GRANDPA_ENGINE_ID as ID; + use sp_core::crypto::UncheckedFrom; + use sp_keyring::Ed25519Keyring; + use substrate_test_runtime_client::{ + runtime::{Block, Header, H256}, + Backend as TestBackend, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClient, TestClientBuilder, TestClientBuilderExt, + }; + + /// Check GRANDPA proof-of-finality for the given block. + /// + /// Returns the vector of headers that MUST be validated + imported + /// AND if at least one of those headers is invalid, all other MUST be considered invalid. + fn check_finality_proof( + current_set_id: SetId, + current_authorities: sp_consensus_grandpa::AuthorityList, + remote_proof: Vec, + ) -> sp_blockchain::Result> + where + NumberFor: BlockNumberOps, + { + let proof = super::FinalityProof::::decode(&mut &remote_proof[..]) + .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; + + let justification: GrandpaJustification = + Decode::decode(&mut &proof.justification[..]) + .map_err(|_| ClientError::JustificationDecode)?; + + justification.verify(current_set_id, ¤t_authorities)?; + + Ok(proof) + } + + pub(crate) type FinalityProof = super::FinalityProof
; + + fn header(number: u64) -> Header { + let parent_hash = match number { + 0 => Default::default(), + _ => header(number - 1).hash(), + }; + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(0), + parent_hash, + Default::default(), + ) + } + + fn test_blockchain( + number_of_blocks: u64, + to_finalize: &[u64], + ) -> (Arc, Arc, Vec) { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let mut blocks = Vec::new(); + for _ in 0..number_of_blocks { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + blocks.push(block); + } + + for block in to_finalize { + let hash = blocks[*block as usize - 1].hash(); + client.finalize_block(hash, None).unwrap(); + } + (client, backend, blocks) + } + + fn store_best_justification(client: &TestClient, just: &GrandpaJustification) { + client + .lock_import_and_run(|import_op| { + crate::aux_schema::update_best_justification(just, |insert| { + apply_aux(import_op, insert, &[]) + }) + }) + .unwrap(); + } + + #[test] + fn finality_proof_fails_if_no_more_last_finalized_blocks() { + let (_, backend, _) = test_blockchain(6, &[4]); + let authority_set_changes = AuthoritySetChanges::empty(); + + // The last finalized block is 4, so we cannot provide further justifications. + let proof_of_5 = prove_finality(&*backend, authority_set_changes, 5, true); + assert!(matches!(proof_of_5, Err(FinalityProofError::BlockNotYetFinalized))); + } + + #[test] + fn finality_proof_is_none_if_no_justification_known() { + let (_, backend, _) = test_blockchain(6, &[4]); + + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 4); + + // Block 4 is finalized without justification + // => we can't prove finality of 3 + let proof_of_3 = prove_finality(&*backend, authority_set_changes, 3, true).unwrap(); + assert_eq!(proof_of_3, None); + } + + #[test] + fn finality_proof_check_fails_when_proof_decode_fails() { + // When we can't decode proof from Vec + check_finality_proof::( + 1, + vec![(UncheckedFrom::unchecked_from([3u8; 32]), 1u64)], + vec![42], + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_check_fails_when_proof_is_empty() { + // When decoded proof has zero length + check_finality_proof::( + 1, + vec![(UncheckedFrom::unchecked_from([3u8; 32]), 1u64)], + Vec::>::new().encode(), + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_check_fails_with_incomplete_justification() { + let (_, _, blocks) = test_blockchain(8, &[4, 5, 8]); + + // Create a commit without precommits + let commit = finality_grandpa::Commit { + target_hash: blocks[7].hash(), + target_number: *blocks[7].header().number(), + precommits: Vec::new(), + }; + + let grandpa_just: GrandpaJustification = + sp_consensus_grandpa::GrandpaJustification::
{ + round: 8, + votes_ancestries: Vec::new(), + commit, + } + .into(); + + let finality_proof = FinalityProof { + block: header(2).hash(), + justification: grandpa_just.encode(), + unknown_headers: Vec::new(), + }; + + check_finality_proof::( + 1, + vec![(UncheckedFrom::unchecked_from([3u8; 32]), 1u64)], + finality_proof.encode(), + ) + .unwrap_err(); + } + + fn create_commit( + block: Block, + round: u64, + set_id: SetId, + auth: &[Ed25519Keyring], + ) -> finality_grandpa::Commit + where + Id: From, + S: From, + { + let mut precommits = Vec::new(); + + for voter in auth { + let precommit = finality_grandpa::Precommit { + target_hash: block.hash(), + target_number: *block.header().number(), + }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_consensus_grandpa::localized_payload(round, set_id, &msg); + let signature = voter.sign(&encoded[..]).into(); + + let signed_precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: voter.public().into(), + }; + precommits.push(signed_precommit); + } + + finality_grandpa::Commit { + target_hash: block.hash(), + target_number: *block.header().number(), + precommits, + } + } + + #[test] + fn finality_proof_check_works_with_correct_justification() { + let (client, _, blocks) = test_blockchain(8, &[4, 5, 8]); + + let alice = Ed25519Keyring::Alice; + let set_id = 1; + let round = 8; + let commit = create_commit(blocks[7].clone(), round, set_id, &[alice]); + let grandpa_just = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + + let finality_proof = FinalityProof { + block: header(2).hash(), + justification: grandpa_just.encode(), + unknown_headers: Vec::new(), + }; + assert_eq!( + finality_proof, + check_finality_proof::( + set_id, + vec![(alice.public().into(), 1u64)], + finality_proof.encode(), + ) + .unwrap(), + ); + } + + #[test] + fn finality_proof_using_authority_set_changes_fails_with_undefined_start() { + let (_, backend, _) = test_blockchain(8, &[4, 5, 8]); + + // We have stored the correct block number for the relevant set, but as we are missing the + // block for the preceding set the start is not well-defined. + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(1, 8); + + let proof_of_6 = prove_finality(&*backend, authority_set_changes, 6, true); + assert!(matches!(proof_of_6, Err(FinalityProofError::BlockNotInAuthoritySetChanges))); + } + + #[test] + fn finality_proof_using_authority_set_changes_works() { + let (client, backend, blocks) = test_blockchain(8, &[4, 5]); + let block7 = &blocks[6]; + let block8 = &blocks[7]; + + let round = 8; + let commit = create_commit(block8.clone(), round, 1, &[Ed25519Keyring::Alice]); + let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + + client + .finalize_block(block8.hash(), Some((ID, grandpa_just8.encode().clone()))) + .unwrap(); + + // Authority set change at block 8, so the justification stored there will be used in the + // FinalityProof for block 6 + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); + authority_set_changes.append(1, 8); + + let proof_of_6: FinalityProof = + prove_finality(&*backend, authority_set_changes.clone(), 6, true) + .unwrap() + .unwrap(); + + assert_eq!( + proof_of_6, + FinalityProof { + block: block8.hash(), + justification: grandpa_just8.encode(), + unknown_headers: vec![block7.header().clone(), block8.header().clone()], + }, + ); + + let proof_of_6_without_unknown: FinalityProof = + prove_finality(&*backend, authority_set_changes.clone(), 6, false) + .unwrap() + .unwrap(); + + assert_eq!( + proof_of_6_without_unknown, + FinalityProof { + block: block8.hash(), + justification: grandpa_just8.encode(), + unknown_headers: vec![], + }, + ); + } + + #[test] + fn finality_proof_in_last_set_fails_without_latest() { + let (_, backend, _) = test_blockchain(8, &[4, 5, 8]); + + // No recent authority set change, so we are in the latest set, and we will try to pickup + // the best stored justification, for which there is none in this case. + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); + + assert!(matches!(prove_finality(&*backend, authority_set_changes, 6, true), Ok(None))); + } + + #[test] + fn finality_proof_in_last_set_using_latest_justification_works() { + let (client, backend, blocks) = test_blockchain(8, &[4, 5, 8]); + let block7 = &blocks[6]; + let block8 = &blocks[7]; + + let round = 8; + let commit = create_commit(block8.clone(), round, 1, &[Ed25519Keyring::Alice]); + let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + store_best_justification(&client, &grandpa_just8); + + // No recent authority set change, so we are in the latest set, and will pickup the best + // stored justification + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); + + let proof_of_6: FinalityProof = + prove_finality(&*backend, authority_set_changes, 6, true).unwrap().unwrap(); + + assert_eq!( + proof_of_6, + FinalityProof { + block: block8.hash(), + justification: grandpa_just8.encode(), + unknown_headers: vec![block7.header().clone(), block8.header().clone()], + } + ); + } +} diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs new file mode 100644 index 00000000..760cb2da --- /dev/null +++ b/substrate/client/consensus/grandpa/src/import.rs @@ -0,0 +1,849 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; + +use log::debug; +use parity_scale_codec::Decode; + +use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sc_consensus::{ + shared_data::{SharedDataLocked, SharedDataLockedUpgradable}, + BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, +}; +use sc_telemetry::TelemetryHandle; +use sc_utils::mpsc::TracingUnboundedSender; +use sp_api::{Core, RuntimeApiInfo, TransactionFor}; +use sp_blockchain::BlockStatus; +use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; +use sp_consensus_grandpa::{ConsensusLog, GrandpaApi, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; +use sp_core::hashing::twox_128; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, + Justification, +}; + +use crate::{ + authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}, + environment, + justification::GrandpaJustification, + notification::GrandpaJustificationSender, + AuthoritySetChanges, ClientForGrandpa, CommandOrError, Error, NewAuthoritySet, VoterCommand, + LOG_TARGET, +}; + +/// A block-import handler for GRANDPA. +/// +/// This scans each imported block for signals of changing authority set. +/// If the block being imported enacts an authority set change then: +/// - If the current authority set is still live: we import the block +/// - Otherwise, the block must include a valid justification. +/// +/// When using GRANDPA, the block import worker should be using this block import +/// object. +pub struct GrandpaBlockImport { + inner: Arc, + justification_import_period: u32, + select_chain: SC, + authority_set: SharedAuthoritySet>, + send_voter_commands: TracingUnboundedSender>>, + authority_set_hard_forks: HashMap>>, + justification_sender: GrandpaJustificationSender, + telemetry: Option, + _phantom: PhantomData, +} + +impl Clone + for GrandpaBlockImport +{ + fn clone(&self) -> Self { + GrandpaBlockImport { + inner: self.inner.clone(), + justification_import_period: self.justification_import_period, + select_chain: self.select_chain.clone(), + authority_set: self.authority_set.clone(), + send_voter_commands: self.send_voter_commands.clone(), + authority_set_hard_forks: self.authority_set_hard_forks.clone(), + justification_sender: self.justification_sender.clone(), + telemetry: self.telemetry.clone(), + _phantom: PhantomData, + } + } +} + +#[async_trait::async_trait] +impl JustificationImport + for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + BE: Backend, + Client: ClientForGrandpa, + SC: SelectChain, +{ + type Error = ConsensusError; + + async fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { + let mut out = Vec::new(); + let chain_info = self.inner.info(); + + // request justifications for all pending changes for which change blocks have already been + // imported + let pending_changes: Vec<_> = + self.authority_set.inner().pending_changes().cloned().collect(); + + for pending_change in pending_changes { + if pending_change.delay_kind == DelayKind::Finalized && + pending_change.effective_number() > chain_info.finalized_number && + pending_change.effective_number() <= chain_info.best_number + { + let effective_block_hash = if !pending_change.delay.is_zero() { + self.select_chain + .finality_target( + pending_change.canon_hash, + Some(pending_change.effective_number()), + ) + .await + } else { + Ok(pending_change.canon_hash) + }; + + if let Ok(hash) = effective_block_hash { + if let Ok(Some(header)) = self.inner.header(hash) { + if *header.number() == pending_change.effective_number() { + out.push((header.hash(), *header.number())); + } + } + } + } + } + + out + } + + async fn import_justification( + &mut self, + hash: Block::Hash, + number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error> { + // this justification was requested by the sync service, therefore we + // are not sure if it should enact a change or not. it could have been a + // request made as part of initial sync but that means the justification + // wasn't part of the block and was requested asynchronously, probably + // makes sense to log in that case. + GrandpaBlockImport::import_justification(self, hash, number, justification, false, false) + } +} + +enum AppliedChanges { + Standard(bool), // true if the change is ready to be applied (i.e. it's a root) + Forced(NewAuthoritySet), + None, +} + +impl AppliedChanges { + fn needs_justification(&self) -> bool { + match *self { + AppliedChanges::Standard(_) => true, + AppliedChanges::Forced(_) | AppliedChanges::None => false, + } + } +} + +struct PendingSetChanges { + just_in_case: Option<( + AuthoritySet>, + SharedDataLockedUpgradable>>, + )>, + applied_changes: AppliedChanges>, + do_pause: bool, +} + +impl PendingSetChanges { + // revert the pending set change explicitly. + fn revert(self) {} + + fn defuse(mut self) -> (AppliedChanges>, bool) { + self.just_in_case = None; + let applied_changes = std::mem::replace(&mut self.applied_changes, AppliedChanges::None); + (applied_changes, self.do_pause) + } +} + +impl Drop for PendingSetChanges { + fn drop(&mut self) { + if let Some((old_set, mut authorities)) = self.just_in_case.take() { + *authorities.upgrade() = old_set; + } + } +} + +/// Checks the given header for a consensus digest signalling a **standard** scheduled change and +/// extracts it. +pub fn find_scheduled_change( + header: &B::Header, +) -> Option>> { + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog>| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +} + +/// Checks the given header for a consensus digest signalling a **forced** scheduled change and +/// extracts it. +pub fn find_forced_change( + header: &B::Header, +) -> Option<(NumberFor, ScheduledChange>)> { + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog>| match log { + ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +} + +impl GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + BE: Backend, + Client: ClientForGrandpa, + Client::Api: GrandpaApi, + for<'a> &'a Client: + BlockImport>, + TransactionFor: 'static, +{ + // check for a new authority set change. + fn check_new_change( + &self, + header: &Block::Header, + hash: Block::Hash, + ) -> Option>> { + // check for forced authority set hard forks + if let Some(change) = self.authority_set_hard_forks.get(&hash) { + return Some(change.clone()) + } + + // check for forced change. + if let Some((median_last_finalized, change)) = find_forced_change::(header) { + return Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Best { median_last_finalized }, + }) + } + + // check normal scheduled change. + let change = find_scheduled_change::(header)?; + Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Finalized, + }) + } + + fn make_authorities_changes( + &self, + block: &mut BlockImportParams>, + hash: Block::Hash, + initial_sync: bool, + ) -> Result, ConsensusError> { + // when we update the authorities, we need to hold the lock + // until the block is written to prevent a race if we need to restore + // the old authority set on error or panic. + struct InnerGuard<'a, H, N> { + old: Option>, + guard: Option>>, + } + + impl<'a, H, N> InnerGuard<'a, H, N> { + fn as_mut(&mut self) -> &mut AuthoritySet { + self.guard.as_mut().expect("only taken on deconstruction; qed") + } + + fn set_old(&mut self, old: AuthoritySet) { + if self.old.is_none() { + // ignore "newer" old changes. + self.old = Some(old); + } + } + + fn consume( + mut self, + ) -> Option<(AuthoritySet, SharedDataLocked<'a, AuthoritySet>)> { + self.old + .take() + .map(|old| (old, self.guard.take().expect("only taken on deconstruction; qed"))) + } + } + + impl<'a, H, N> Drop for InnerGuard<'a, H, N> { + fn drop(&mut self) { + if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { + *guard = old; + } + } + } + + let number = *(block.header.number()); + let maybe_change = self.check_new_change(&block.header, hash); + + // returns a function for checking whether a block is a descendent of another + // consistent with querying client directly after importing the block. + let parent_hash = *block.header.parent_hash(); + let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); + + let mut guard = InnerGuard { guard: Some(self.authority_set.inner_locked()), old: None }; + + // whether to pause the old authority set -- happens after import + // of a forced change block. + let mut do_pause = false; + + // add any pending changes. + if let Some(change) = maybe_change { + let old = guard.as_mut().clone(); + guard.set_old(old); + + if let DelayKind::Best { .. } = change.delay_kind { + do_pause = true; + } + + guard + .as_mut() + .add_pending_change(change, &is_descendent_of) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + } + + let applied_changes = { + let forced_change_set = guard + .as_mut() + .apply_forced_changes( + hash, + number, + &is_descendent_of, + initial_sync, + self.telemetry.clone(), + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if let Some((median_last_finalized_number, new_set)) = forced_change_set { + let new_authorities = { + let (set_id, new_authorities) = new_set.current(); + + // we will use the median last finalized number as a hint + // for the canon block the new authority set should start + // with. we use the minimum between the median and the local + // best finalized block. + let best_finalized_number = self.inner.info().finalized_number; + let canon_number = best_finalized_number.min(median_last_finalized_number); + let canon_hash = self.inner.hash(canon_number) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .expect( + "the given block number is less or equal than the current best finalized number; \ + current best finalized number must exist in chain; qed." + ); + + NewAuthoritySet { + canon_number, + canon_hash, + set_id, + authorities: new_authorities.to_vec(), + } + }; + let old = ::std::mem::replace(guard.as_mut(), new_set); + guard.set_old(old); + + AppliedChanges::Forced(new_authorities) + } else { + let did_standard = guard + .as_mut() + .enacts_standard_change(hash, number, &is_descendent_of) + .map_err(|e| ConsensusError::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if let Some(root) = did_standard { + AppliedChanges::Standard(root) + } else { + AppliedChanges::None + } + } + }; + + // consume the guard safely and write necessary changes. + let just_in_case = guard.consume(); + if let Some((_, ref authorities)) = just_in_case { + let authorities_change = match applied_changes { + AppliedChanges::Forced(ref new) => Some(new), + AppliedChanges::Standard(_) => None, // the change isn't actually applied yet. + AppliedChanges::None => None, + }; + + crate::aux_schema::update_authority_set::( + authorities, + authorities_change, + |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }, + ); + } + + let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); + + Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) + } + + /// Read current set id form a given state. + fn current_set_id(&self, hash: Block::Hash) -> Result { + let runtime_version = self.inner.runtime_api().version(hash).map_err(|e| { + ConsensusError::ClientImport(format!( + "Unable to retrieve current runtime version. {}", + e + )) + })?; + + if runtime_version + .api_version(&>::ID) + .map_or(false, |v| v < 3) + { + // The new API is not supported in this runtime. Try reading directly from storage. + // This code may be removed once warp sync to an old runtime is no longer needed. + for prefix in ["GrandpaFinality", "Grandpa"] { + let k = [twox_128(prefix.as_bytes()), twox_128(b"CurrentSetId")].concat(); + if let Ok(Some(id)) = + self.inner.storage(hash, &sc_client_api::StorageKey(k.to_vec())) + { + if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { + return Ok(id) + } + } + } + Err(ConsensusError::ClientImport("Unable to retrieve current set id.".into())) + } else { + self.inner + .runtime_api() + .current_set_id(hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string())) + } + } + + /// Import whole new state and reset authority set. + async fn import_state( + &mut self, + mut block: BlockImportParams>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + // Force imported state finality. + block.finalized = true; + let import_result = (&*self.inner).import_block(block).await; + match import_result { + Ok(ImportResult::Imported(aux)) => { + // We've just imported a new state. We trust the sync module has verified + // finality proofs and that the state is correct and final. + // So we can read the authority list and set id from the state. + self.authority_set_hard_forks.clear(); + let authorities = self + .inner + .runtime_api() + .grandpa_authorities(hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let set_id = self.current_set_id(hash)?; + let authority_set = AuthoritySet::new( + authorities.clone(), + set_id, + fork_tree::ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .ok_or_else(|| ConsensusError::ClientImport("Invalid authority list".into()))?; + *self.authority_set.inner_locked() = authority_set.clone(); + + crate::aux_schema::update_authority_set::( + &authority_set, + None, + |insert| self.inner.insert_aux(insert, []), + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let new_set = + NewAuthoritySet { canon_number: number, canon_hash: hash, set_id, authorities }; + let _ = self + .send_voter_commands + .unbounded_send(VoterCommand::ChangeAuthorities(new_set)); + Ok(ImportResult::Imported(aux)) + }, + Ok(r) => Ok(r), + Err(e) => Err(ConsensusError::ClientImport(e.to_string())), + } + } +} + +#[async_trait::async_trait] +impl BlockImport for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + BE: Backend, + Client: ClientForGrandpa, + Client::Api: GrandpaApi, + for<'a> &'a Client: + BlockImport>, + TransactionFor: 'static, + SC: Send, +{ + type Error = ConsensusError; + type Transaction = TransactionFor; + + async fn import_block( + &mut self, + mut block: BlockImportParams, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + + // early exit if block already in chain, otherwise the check for + // authority changes will error when trying to re-import a change block + match self.inner.status(hash) { + Ok(BlockStatus::InChain) => { + // Strip justifications when re-importing an existing block. + let _justifications = block.justifications.take(); + return (&*self.inner).import_block(block).await + }, + Ok(BlockStatus::Unknown) => {}, + Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + } + + if block.with_state() { + return self.import_state(block).await + } + + if number <= self.inner.info().finalized_number { + // Importing an old block. Just save justifications and authority set changes + if self.check_new_change(&block.header, hash).is_some() { + if block.justifications.is_none() { + return Err(ConsensusError::ClientImport( + "Justification required when importing \ + an old block with authority set change." + .into(), + )) + } + assert!(block.justifications.is_some()); + let mut authority_set = self.authority_set.inner_locked(); + authority_set.authority_set_changes.insert(number); + crate::aux_schema::update_authority_set::( + &authority_set, + None, + |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }, + ); + } + return (&*self.inner).import_block(block).await + } + + // on initial sync we will restrict logging under info to avoid spam. + let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; + + let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; + + // we don't want to finalize on `inner.import_block` + let mut justifications = block.justifications.take(); + let import_result = (&*self.inner).import_block(block).await; + + let mut imported_aux = { + match import_result { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => { + debug!( + target: LOG_TARGET, + "Restoring old authority set after block import result: {:?}", r, + ); + pending_changes.revert(); + return Ok(r) + }, + Err(e) => { + debug!( + target: LOG_TARGET, + "Restoring old authority set after block import error: {}", e, + ); + pending_changes.revert(); + return Err(ConsensusError::ClientImport(e.to_string())) + }, + } + }; + + let (applied_changes, do_pause) = pending_changes.defuse(); + + // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. + if do_pause { + let _ = self.send_voter_commands.unbounded_send(VoterCommand::Pause( + "Forced change scheduled after inactivity".to_string(), + )); + } + + let needs_justification = applied_changes.needs_justification(); + + match applied_changes { + AppliedChanges::Forced(new) => { + // NOTE: when we do a force change we are "discrediting" the old set so we + // ignore any justifications from them. this block may contain a justification + // which should be checked and imported below against the new authority + // triggered by this forced change. the new grandpa voter will start at the + // last median finalized block (which is before the block that enacts the + // change), full nodes syncing the chain will not be able to successfully + // import justifications for those blocks since their local authority set view + // is still of the set before the forced change was enacted, still after #1867 + // they should import the block and discard the justification, and they will + // then request a justification from sync if it's necessary (which they should + // then be able to successfully validate). + let _ = + self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); + + // we must clear all pending justifications requests, presumably they won't be + // finalized hence why this forced changes was triggered + imported_aux.clear_justification_requests = true; + }, + AppliedChanges::Standard(false) => { + // we can't apply this change yet since there are other dependent changes that we + // need to apply first, drop any justification that might have been provided with + // the block to make sure we request them from `sync` which will ensure they'll be + // applied in-order. + justifications.take(); + }, + _ => {}, + } + + let grandpa_justification = + justifications.and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)); + + match grandpa_justification { + Some(justification) => { + if environment::should_process_justification( + &*self.inner, + self.justification_import_period, + number, + needs_justification, + ) { + let import_res = self.import_justification( + hash, + number, + (GRANDPA_ENGINE_ID, justification), + needs_justification, + initial_sync, + ); + + import_res.unwrap_or_else(|err| { + if needs_justification { + debug!( + target: LOG_TARGET, + "Requesting justification from peers due to imported block #{} that enacts authority set change with invalid justification: {}", + number, + err + ); + imported_aux.bad_justification = true; + imported_aux.needs_justification = true; + } + }); + } else { + debug!( + target: LOG_TARGET, + "Ignoring unnecessary justification for block #{}", + number, + ); + } + }, + None => + if needs_justification { + debug!( + target: LOG_TARGET, + "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", + number, + ); + + imported_aux.needs_justification = true; + }, + } + + Ok(ImportResult::Imported(imported_aux)) + } + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).await + } +} + +impl GrandpaBlockImport { + pub(crate) fn new( + inner: Arc, + justification_import_period: u32, + select_chain: SC, + authority_set: SharedAuthoritySet>, + send_voter_commands: TracingUnboundedSender>>, + authority_set_hard_forks: Vec<(SetId, PendingChange>)>, + justification_sender: GrandpaJustificationSender, + telemetry: Option, + ) -> GrandpaBlockImport { + // check for and apply any forced authority set hard fork that applies + // to the *current* authority set. + if let Some((_, change)) = authority_set_hard_forks + .iter() + .find(|(set_id, _)| *set_id == authority_set.set_id()) + { + authority_set.inner().current_authorities = change.next_authorities.clone(); + } + + // index authority set hard forks by block hash so that they can be used + // by any node syncing the chain and importing a block hard fork + // authority set changes. + let authority_set_hard_forks = authority_set_hard_forks + .into_iter() + .map(|(_, change)| (change.canon_hash, change)) + .collect::>(); + + // check for and apply any forced authority set hard fork that apply to + // any *pending* standard changes, checking by the block hash at which + // they were announced. + { + let mut authority_set = authority_set.inner(); + + authority_set.pending_standard_changes = + authority_set.pending_standard_changes.clone().map(&mut |hash, _, original| { + authority_set_hard_forks.get(hash).cloned().unwrap_or(original) + }); + } + + GrandpaBlockImport { + inner, + justification_import_period, + select_chain, + authority_set, + send_voter_commands, + authority_set_hard_forks, + justification_sender, + telemetry, + _phantom: PhantomData, + } + } +} + +impl GrandpaBlockImport +where + BE: Backend, + Client: ClientForGrandpa, + NumberFor: finality_grandpa::BlockNumberOps, +{ + /// Import a block justification and finalize the block. + /// + /// If `enacts_change` is set to true, then finalizing this block *must* + /// enact an authority set change, the function will panic otherwise. + fn import_justification( + &mut self, + hash: Block::Hash, + number: NumberFor, + justification: Justification, + enacts_change: bool, + initial_sync: bool, + ) -> Result<(), ConsensusError> { + if justification.0 != GRANDPA_ENGINE_ID { + // TODO: the import queue needs to be refactored to be able dispatch to the correct + // `JustificationImport` instance based on `ConsensusEngineId`, or we need to build a + // justification import pipeline similar to what we do for `BlockImport`. In the + // meantime we'll just drop the justification, since this is only used for BEEFY which + // is still WIP. + return Ok(()) + } + + let justification = GrandpaJustification::decode_and_verify_finalizes( + &justification.1, + (hash, number), + self.authority_set.set_id(), + &self.authority_set.current_authorities(), + ); + + let justification = match justification { + Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + Ok(justification) => justification, + }; + + let result = environment::finalize_block( + self.inner.clone(), + &self.authority_set, + None, + hash, + number, + justification.into(), + initial_sync, + Some(&self.justification_sender), + self.telemetry.clone(), + ); + + match result { + Err(CommandOrError::VoterCommand(command)) => { + grandpa_log!( + initial_sync, + "👴 Imported justification for block #{} that triggers \ + command {}, signaling voter.", + number, + command, + ); + + // send the command to the voter + let _ = self.send_voter_commands.unbounded_send(command); + }, + Err(CommandOrError::Error(e)) => + return Err(match e { + Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), + Error::Network(error) => ConsensusError::ClientImport(error), + Error::Blockchain(error) => ConsensusError::ClientImport(error), + Error::Client(error) => ConsensusError::ClientImport(error.to_string()), + Error::Safety(error) => ConsensusError::ClientImport(error), + Error::Signing(error) => ConsensusError::ClientImport(error), + Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), + Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), + }), + Ok(_) => { + assert!( + !enacts_change, + "returns Ok when no authority set change should be enacted; qed;" + ); + }, + } + + Ok(()) + } +} diff --git a/substrate/client/consensus/grandpa/src/justification.rs b/substrate/client/consensus/grandpa/src/justification.rs new file mode 100644 index 00000000..c300a3d7 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/justification.rs @@ -0,0 +1,307 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + sync::Arc, +}; + +use finality_grandpa::{voter_set::VoterSet, Error as GrandpaError}; +use parity_scale_codec::{Decode, Encode}; +use sp_blockchain::{Error as ClientError, HeaderBackend}; +use sp_consensus_grandpa::AuthorityId; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; + +use crate::{AuthorityList, Commit, Error}; + +/// A GRANDPA justification for block finality, it includes a commit message and +/// an ancestry proof including all headers routing all precommit target blocks +/// to the commit target block. Due to the current voting strategy the precommit +/// targets should be the same as the commit target, since honest voters don't +/// vote past authority set change blocks. +/// +/// This is meant to be stored in the db and passed around the network to other +/// nodes, and are used by syncing nodes to prove authority set handoffs. +#[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)] +pub struct GrandpaJustification { + /// The GRANDPA justification for block finality. + pub justification: sp_consensus_grandpa::GrandpaJustification, + _block: PhantomData, +} + +impl From> + for GrandpaJustification +{ + fn from(justification: sp_consensus_grandpa::GrandpaJustification) -> Self { + Self { justification, _block: Default::default() } + } +} + +impl Into> + for GrandpaJustification +{ + fn into(self) -> sp_consensus_grandpa::GrandpaJustification { + self.justification + } +} + +impl GrandpaJustification { + /// Create a GRANDPA justification from the given commit. This method + /// assumes the commit is valid and well-formed. + pub fn from_commit( + client: &Arc, + round: u64, + commit: Commit, + ) -> Result + where + C: HeaderBackend, + { + let mut votes_ancestries_hashes = HashSet::new(); + let mut votes_ancestries = Vec::new(); + + let error = || { + let msg = "invalid precommits for target commit".to_string(); + Err(Error::Client(ClientError::BadJustification(msg))) + }; + + // we pick the precommit for the lowest block as the base that + // should serve as the root block for populating ancestry (i.e. + // collect all headers from all precommit blocks to the base) + let (base_hash, base_number) = match commit + .precommits + .iter() + .map(|signed| &signed.precommit) + .min_by_key(|precommit| precommit.target_number) + .map(|precommit| (precommit.target_hash, precommit.target_number)) + { + None => return error(), + Some(base) => base, + }; + + for signed in commit.precommits.iter() { + let mut current_hash = signed.precommit.target_hash; + loop { + if current_hash == base_hash { + break + } + + match client.header(current_hash)? { + Some(current_header) => { + // NOTE: this should never happen as we pick the lowest block + // as base and only traverse backwards from the other blocks + // in the commit. but better be safe to avoid an unbound loop. + if *current_header.number() <= base_number { + return error() + } + + let parent_hash = *current_header.parent_hash(); + if votes_ancestries_hashes.insert(current_hash) { + votes_ancestries.push(current_header); + } + + current_hash = parent_hash; + }, + _ => return error(), + } + } + } + + Ok(sp_consensus_grandpa::GrandpaJustification { round, commit, votes_ancestries }.into()) + } + + /// Decode a GRANDPA justification and validate the commit and the votes' + /// ancestry proofs finalize the given block. + pub fn decode_and_verify_finalizes( + encoded: &[u8], + finalized_target: (Block::Hash, NumberFor), + set_id: u64, + voters: &VoterSet, + ) -> Result + where + NumberFor: finality_grandpa::BlockNumberOps, + { + let justification = GrandpaJustification::::decode(&mut &*encoded) + .map_err(|_| ClientError::JustificationDecode)?; + + if ( + justification.justification.commit.target_hash, + justification.justification.commit.target_number, + ) != finalized_target + { + let msg = "invalid commit target in grandpa justification".to_string(); + Err(ClientError::BadJustification(msg)) + } else { + justification.verify_with_voter_set(set_id, voters).map(|_| justification) + } + } + + /// Validate the commit and the votes' ancestry proofs. + pub fn verify(&self, set_id: u64, authorities: &AuthorityList) -> Result<(), ClientError> + where + NumberFor: finality_grandpa::BlockNumberOps, + { + let voters = VoterSet::new(authorities.iter().cloned()) + .ok_or(ClientError::Consensus(sp_consensus::Error::InvalidAuthoritiesSet))?; + + self.verify_with_voter_set(set_id, &voters) + } + + /// Validate the commit and the votes' ancestry proofs. + pub(crate) fn verify_with_voter_set( + &self, + set_id: u64, + voters: &VoterSet, + ) -> Result<(), ClientError> + where + NumberFor: finality_grandpa::BlockNumberOps, + { + use finality_grandpa::Chain; + + let ancestry_chain = AncestryChain::::new(&self.justification.votes_ancestries); + + match finality_grandpa::validate_commit(&self.justification.commit, voters, &ancestry_chain) + { + Ok(ref result) if result.is_valid() => {}, + _ => { + let msg = "invalid commit in grandpa justification".to_string(); + return Err(ClientError::BadJustification(msg)) + }, + } + + // we pick the precommit for the lowest block as the base that + // should serve as the root block for populating ancestry (i.e. + // collect all headers from all precommit blocks to the base) + let base_hash = self + .justification + .commit + .precommits + .iter() + .map(|signed| &signed.precommit) + .min_by_key(|precommit| precommit.target_number) + .map(|precommit| precommit.target_hash) + .expect( + "can only fail if precommits is empty; \ + commit has been validated above; \ + valid commits must include precommits; \ + qed.", + ); + + let mut buf = Vec::new(); + let mut visited_hashes = HashSet::new(); + for signed in self.justification.commit.precommits.iter() { + if !sp_consensus_grandpa::check_message_signature_with_buffer( + &finality_grandpa::Message::Precommit(signed.precommit.clone()), + &signed.id, + &signed.signature, + self.justification.round, + set_id, + &mut buf, + ) { + return Err(ClientError::BadJustification( + "invalid signature for precommit in grandpa justification".to_string(), + )) + } + + if base_hash == signed.precommit.target_hash { + continue + } + + match ancestry_chain.ancestry(base_hash, signed.precommit.target_hash) { + Ok(route) => { + // ancestry starts from parent hash but the precommit target hash has been + // visited + visited_hashes.insert(signed.precommit.target_hash); + for hash in route { + visited_hashes.insert(hash); + } + }, + _ => + return Err(ClientError::BadJustification( + "invalid precommit ancestry proof in grandpa justification".to_string(), + )), + } + } + + let ancestry_hashes: HashSet<_> = self + .justification + .votes_ancestries + .iter() + .map(|h: &Block::Header| h.hash()) + .collect(); + + if visited_hashes != ancestry_hashes { + return Err(ClientError::BadJustification( + "invalid precommit ancestries in grandpa justification with unused headers" + .to_string(), + )) + } + + Ok(()) + } + + /// The target block number and hash that this justifications proves finality for. + pub fn target(&self) -> (NumberFor, Block::Hash) { + (self.justification.commit.target_number, self.justification.commit.target_hash) + } +} + +/// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. +/// This is useful when validating commits, using the given set of headers to +/// verify a valid ancestry route to the target commit block. +struct AncestryChain { + ancestry: HashMap, +} + +impl AncestryChain { + fn new(ancestry: &[Block::Header]) -> AncestryChain { + let ancestry: HashMap<_, _> = + ancestry.iter().cloned().map(|h: Block::Header| (h.hash(), h)).collect(); + + AncestryChain { ancestry } + } +} + +impl finality_grandpa::Chain> for AncestryChain +where + NumberFor: finality_grandpa::BlockNumberOps, +{ + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + let mut route = Vec::new(); + let mut current_hash = block; + loop { + if current_hash == base { + break + } + match self.ancestry.get(¤t_hash) { + Some(current_header) => { + current_hash = *current_header.parent_hash(); + route.push(current_hash); + }, + _ => return Err(GrandpaError::NotDescendent), + } + } + route.pop(); // remove the base + + Ok(route) + } +} diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs new file mode 100644 index 00000000..08417a3a --- /dev/null +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -0,0 +1,1209 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Integration of the GRANDPA finality gadget into substrate. +//! +//! This crate is unstable and the API and usage may change. +//! +//! This crate provides a long-running future that produces finality notifications. +//! +//! # Usage +//! +//! First, create a block-import wrapper with the `block_import` function. The +//! GRANDPA worker needs to be linked together with this block import object, so +//! a `LinkHalf` is returned as well. All blocks imported (from network or +//! consensus or otherwise) must pass through this wrapper, otherwise consensus +//! is likely to break in unexpected ways. +//! +//! Next, use the `LinkHalf` and a local configuration to `run_grandpa_voter`. +//! This requires a `Network` implementation. The returned future should be +//! driven to completion and will finalize blocks in the background. +//! +//! # Changing authority sets +//! +//! The rough idea behind changing authority sets in GRANDPA is that at some point, +//! we obtain agreement for some maximum block height that the current set can +//! finalize, and once a block with that height is finalized the next set will +//! pick up finalization from there. +//! +//! Technically speaking, this would be implemented as a voting rule which says, +//! "if there is a signal for a change in N blocks in block B, only vote on +//! chains with length NUM(B) + N if they contain B". This conditional-inclusion +//! logic is complex to compute because it requires looking arbitrarily far +//! back in the chain. +//! +//! Instead, we keep track of a list of all signals we've seen so far (across +//! all forks), sorted ascending by the block number they would be applied at. +//! We never vote on chains with number higher than the earliest handoff block +//! number (this is num(signal) + N). When finalizing a block, we either apply +//! or prune any signaled changes based on whether the signaling block is +//! included in the newly-finalized chain. + +#![warn(missing_docs)] + +use futures::{prelude::*, StreamExt}; +use log::{debug, error, info}; +use parity_scale_codec::Decode; +use parking_lot::RwLock; +use prometheus_endpoint::{PrometheusError, Registry}; +use sc_client_api::{ + backend::{AuxStore, Backend}, + utils::is_descendent_of, + BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun, + StorageProvider, TransactionFor, +}; +use sc_consensus::BlockImport; +use sc_network::types::ProtocolName; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppCrypto; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; +use sp_consensus::SelectChain; +use sp_consensus_grandpa::{ + AuthorityList, AuthoritySignature, SetId, CLIENT_LOG_TARGET as LOG_TARGET, +}; +use sp_core::{crypto::ByteArray, traits::CallContext}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor, Zero}, +}; + +pub use finality_grandpa::BlockNumberOps; +use finality_grandpa::{voter, voter_set::VoterSet, Error as GrandpaError}; + +use std::{ + fmt, io, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; + +// utility logging macro that takes as first argument a conditional to +// decide whether to log under debug or info level (useful to restrict +// logging under initial sync). +macro_rules! grandpa_log { + ($condition:expr, $($msg: expr),+ $(,)?) => { + { + let log_level = if $condition { + log::Level::Debug + } else { + log::Level::Info + }; + + log::log!(target: LOG_TARGET, log_level, $($msg),+); + } + }; +} + +mod authorities; +mod aux_schema; +mod communication; +mod environment; +mod finality_proof; +mod import; +mod justification; +mod notification; +mod observer; +mod until_imported; +mod voting_rule; +pub mod warp_proof; + +pub use authorities::{AuthoritySet, AuthoritySetChanges, SharedAuthoritySet}; +pub use aux_schema::best_justification; +pub use communication::grandpa_protocol_name::standard_name as protocol_standard_name; +pub use finality_grandpa::voter::report; +pub use finality_proof::{FinalityProof, FinalityProofError, FinalityProofProvider}; +pub use import::{find_forced_change, find_scheduled_change, GrandpaBlockImport}; +pub use justification::GrandpaJustification; +pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream}; +pub use observer::run_grandpa_observer; +pub use voting_rule::{ + BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRuleResult, + VotingRulesBuilder, +}; + +use aux_schema::PersistentData; +use communication::{Network as NetworkT, NetworkBridge, Syncing as SyncingT}; +use environment::{Environment, VoterSetState}; +use until_imported::UntilGlobalMessageBlocksImported; + +// Re-export these two because it's just so damn convenient. +pub use sp_consensus_grandpa::{ + AuthorityId, AuthorityPair, CatchUp, Commit, CompactCommit, GrandpaApi, Message, Precommit, + Prevote, PrimaryPropose, ScheduledChange, SignedMessage, +}; +use std::marker::PhantomData; + +#[cfg(test)] +mod tests; + +/// A global communication input stream for commits and catch up messages. Not +/// exposed publicly, used internally to simplify types in the communication +/// layer. +type CommunicationIn = voter::CommunicationIn< + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, +>; +/// Global communication input stream for commits and catch up messages, with +/// the hash type not being derived from the block, useful for forcing the hash +/// to some type (e.g. `H256`) when the compiler can't do the inference. +type CommunicationInH = + voter::CommunicationIn, AuthoritySignature, AuthorityId>; + +/// Global communication sink for commits with the hash type not being derived +/// from the block, useful for forcing the hash to some type (e.g. `H256`) when +/// the compiler can't do the inference. +type CommunicationOutH = + voter::CommunicationOut, AuthoritySignature, AuthorityId>; + +/// Shared voter state for querying. +pub struct SharedVoterState { + inner: Arc + Sync + Send>>>>, +} + +impl SharedVoterState { + /// Create a new empty `SharedVoterState` instance. + pub fn empty() -> Self { + Self { inner: Arc::new(RwLock::new(None)) } + } + + fn reset( + &self, + voter_state: Box + Sync + Send>, + ) -> Option<()> { + let mut shared_voter_state = self.inner.try_write_for(Duration::from_secs(1))?; + + *shared_voter_state = Some(voter_state); + Some(()) + } + + /// Get the inner `VoterState` instance. + pub fn voter_state(&self) -> Option> { + self.inner.read().as_ref().map(|vs| vs.get()) + } +} + +impl Clone for SharedVoterState { + fn clone(&self) -> Self { + SharedVoterState { inner: self.inner.clone() } + } +} + +/// Configuration for the GRANDPA service +#[derive(Clone)] +pub struct Config { + /// The expected duration for a message to be gossiped across the network. + pub gossip_duration: Duration, + /// Justification generation period (in blocks). GRANDPA will try to generate + /// justifications at least every justification_generation_period blocks. There + /// are some other events which might cause justification generation. + pub justification_generation_period: u32, + /// Whether the GRANDPA observer protocol is live on the network and thereby + /// a full-node not running as a validator is running the GRANDPA observer + /// protocol (we will only issue catch-up requests to authorities when the + /// observer protocol is enabled). + pub observer_enabled: bool, + /// The role of the local node (i.e. authority, full-node or light). + pub local_role: sc_network::config::Role, + /// Some local identifier of the voter. + pub name: Option, + /// The keystore that manages the keys of this node. + pub keystore: Option, + /// TelemetryHandle instance. + pub telemetry: Option, + /// Chain specific GRANDPA protocol name. See [`crate::protocol_standard_name`]. + pub protocol_name: ProtocolName, +} + +impl Config { + fn name(&self) -> &str { + self.name.as_deref().unwrap_or("") + } +} + +/// Errors that can occur while voting in GRANDPA. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// An error within grandpa. + #[error("grandpa error: {0}")] + Grandpa(#[from] GrandpaError), + + /// A network error. + #[error("network error: {0}")] + Network(String), + + /// A blockchain error. + #[error("blockchain error: {0}")] + Blockchain(String), + + /// Could not complete a round on disk. + #[error("could not complete a round on disk: {0}")] + Client(#[from] ClientError), + + /// Could not sign outgoing message + #[error("could not sign outgoing message: {0}")] + Signing(String), + + /// An invariant has been violated (e.g. not finalizing pending change blocks in-order) + #[error("safety invariant has been violated: {0}")] + Safety(String), + + /// A timer failed to fire. + #[error("a timer failed to fire: {0}")] + Timer(io::Error), + + /// A runtime api request failed. + #[error("runtime API request failed: {0}")] + RuntimeApi(sp_api::ApiError), +} + +/// Something which can determine if a block is known. +pub(crate) trait BlockStatus { + /// Return `Ok(Some(number))` or `Ok(None)` depending on whether the block + /// is definitely known and has been imported. + /// If an unexpected error occurs, return that. + fn block_number(&self, hash: Block::Hash) -> Result>, Error>; +} + +impl BlockStatus for Arc +where + Client: HeaderBackend, + NumberFor: BlockNumberOps, +{ + fn block_number(&self, hash: Block::Hash) -> Result>, Error> { + self.block_number_from_id(&BlockId::Hash(hash)) + .map_err(|e| Error::Blockchain(e.to_string())) + } +} + +/// A trait that includes all the client functionalities grandpa requires. +/// Ideally this would be a trait alias, we're not there yet. +/// tracking issue +pub trait ClientForGrandpa: + LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + + BlockImport, Error = sp_consensus::Error> + + StorageProvider +where + BE: Backend, + Block: BlockT, +{ +} + +impl ClientForGrandpa for T +where + BE: Backend, + Block: BlockT, + T: LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + + BlockImport, Error = sp_consensus::Error> + + StorageProvider, +{ +} + +/// Something that one can ask to do a block sync request. +pub(crate) trait BlockSyncRequester { + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); +} + +impl BlockSyncRequester for NetworkBridge +where + Block: BlockT, + Network: NetworkT, + Syncing: SyncingT, +{ + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ) { + NetworkBridge::set_sync_fork_request(self, peers, hash, number) + } +} + +/// A new authority set along with the canonical block it changed at. +#[derive(Debug)] +pub(crate) struct NewAuthoritySet { + pub(crate) canon_number: N, + pub(crate) canon_hash: H, + pub(crate) set_id: SetId, + pub(crate) authorities: AuthorityList, +} + +/// Commands issued to the voter. +#[derive(Debug)] +pub(crate) enum VoterCommand { + /// Pause the voter for given reason. + Pause(String), + /// New authorities. + ChangeAuthorities(NewAuthoritySet), +} + +impl fmt::Display for VoterCommand { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + VoterCommand::Pause(ref reason) => write!(f, "Pausing voter: {}", reason), + VoterCommand::ChangeAuthorities(_) => write!(f, "Changing authorities"), + } + } +} + +/// Signals either an early exit of a voter or an error. +#[derive(Debug)] +pub(crate) enum CommandOrError { + /// An error occurred. + Error(Error), + /// A command to the voter. + VoterCommand(VoterCommand), +} + +impl From for CommandOrError { + fn from(e: Error) -> Self { + CommandOrError::Error(e) + } +} + +impl From for CommandOrError { + fn from(e: ClientError) -> Self { + CommandOrError::Error(Error::Client(e)) + } +} + +impl From for CommandOrError { + fn from(e: finality_grandpa::Error) -> Self { + CommandOrError::Error(Error::from(e)) + } +} + +impl From> for CommandOrError { + fn from(e: VoterCommand) -> Self { + CommandOrError::VoterCommand(e) + } +} + +impl ::std::error::Error for CommandOrError {} + +impl fmt::Display for CommandOrError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CommandOrError::Error(ref e) => write!(f, "{}", e), + CommandOrError::VoterCommand(ref cmd) => write!(f, "{}", cmd), + } + } +} + +/// Link between the block importer and the background voter. +pub struct LinkHalf { + client: Arc, + select_chain: SC, + persistent_data: PersistentData, + voter_commands_rx: TracingUnboundedReceiver>>, + justification_sender: GrandpaJustificationSender, + justification_stream: GrandpaJustificationStream, + telemetry: Option, +} + +impl LinkHalf { + /// Get the shared authority set. + pub fn shared_authority_set(&self) -> &SharedAuthoritySet> { + &self.persistent_data.authority_set + } + + /// Get the receiving end of justification notifications. + pub fn justification_stream(&self) -> GrandpaJustificationStream { + self.justification_stream.clone() + } +} + +/// Provider for the Grandpa authority set configured on the genesis block. +pub trait GenesisAuthoritySetProvider { + /// Get the authority set at the genesis block. + fn get(&self) -> Result; +} + +impl GenesisAuthoritySetProvider for Arc +where + E: CallExecutor, + Client: ExecutorProvider + HeaderBackend, +{ + fn get(&self) -> Result { + // This implementation uses the Grandpa runtime API instead of reading directly from the + // `GRANDPA_AUTHORITIES_KEY` as the data may have been migrated since the genesis block of + // the chain, whereas the runtime API is backwards compatible. + self.executor() + .call( + self.expect_block_hash_from_id(&BlockId::Number(Zero::zero()))?, + "GrandpaApi_grandpa_authorities", + &[], + ExecutionStrategy::NativeElseWasm, + CallContext::Offchain, + ) + .and_then(|call_result| { + Decode::decode(&mut &call_result[..]).map_err(|err| { + ClientError::CallResultDecode( + "failed to decode GRANDPA authorities set proof", + err, + ) + }) + }) + } +} + +/// Make block importer and link half necessary to tie the background voter +/// to it. +/// +/// The `justification_import_period` sets the minimum period on which +/// justifications will be imported. When importing a block, if it includes a +/// justification it will only be processed if it fits within this period, +/// otherwise it will be ignored (and won't be validated). This is to avoid +/// slowing down sync by a peer serving us unnecessary justifications which +/// aren't trivial to validate. +pub fn block_import( + client: Arc, + justification_import_period: u32, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + select_chain: SC, + telemetry: Option, +) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> +where + SC: SelectChain, + BE: Backend + 'static, + Client: ClientForGrandpa + 'static, +{ + block_import_with_authority_set_hard_forks( + client, + justification_import_period, + genesis_authorities_provider, + select_chain, + Default::default(), + telemetry, + ) +} + +/// A descriptor for an authority set hard fork. These are authority set changes +/// that are not signalled by the runtime and instead are defined off-chain +/// (hence the hard fork). +pub struct AuthoritySetHardFork { + /// The new authority set id. + pub set_id: SetId, + /// The block hash and number at which the hard fork should be applied. + pub block: (Block::Hash, NumberFor), + /// The authorities in the new set. + pub authorities: AuthorityList, + /// The latest block number that was finalized before this authority set + /// hard fork. When defined, the authority set change will be forced, i.e. + /// the node won't wait for the block above to be finalized before enacting + /// the change, and the given finalized number will be used as a base for + /// voting. + pub last_finalized: Option>, +} + +/// Make block importer and link half necessary to tie the background voter to +/// it. A vector of authority set hard forks can be passed, any authority set +/// change signaled at the given block (either already signalled or in a further +/// block when importing it) will be replaced by a standard change with the +/// given static authorities. +pub fn block_import_with_authority_set_hard_forks( + client: Arc, + justification_import_period: u32, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + select_chain: SC, + authority_set_hard_forks: Vec>, + telemetry: Option, +) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> +where + SC: SelectChain, + BE: Backend + 'static, + Client: ClientForGrandpa + 'static, +{ + let chain_info = client.info(); + let genesis_hash = chain_info.genesis_hash; + + let persistent_data = + aux_schema::load_persistent(&*client, genesis_hash, >::zero(), { + let telemetry = telemetry.clone(); + move || { + let authorities = genesis_authorities_provider.get()?; + telemetry!( + telemetry; + CONSENSUS_DEBUG; + "afg.loading_authorities"; + "authorities_len" => ?authorities.len() + ); + Ok(authorities) + } + })?; + + let (voter_commands_tx, voter_commands_rx) = + tracing_unbounded("mpsc_grandpa_voter_command", 100_000); + + let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); + + // create pending change objects with 0 delay for each authority set hard fork. + let authority_set_hard_forks = authority_set_hard_forks + .into_iter() + .map(|fork| { + let delay_kind = if let Some(last_finalized) = fork.last_finalized { + authorities::DelayKind::Best { median_last_finalized: last_finalized } + } else { + authorities::DelayKind::Finalized + }; + + ( + fork.set_id, + authorities::PendingChange { + next_authorities: fork.authorities, + delay: Zero::zero(), + canon_hash: fork.block.0, + canon_height: fork.block.1, + delay_kind, + }, + ) + }) + .collect(); + + Ok(( + GrandpaBlockImport::new( + client.clone(), + justification_import_period, + select_chain.clone(), + persistent_data.authority_set.clone(), + voter_commands_tx, + authority_set_hard_forks, + justification_sender.clone(), + telemetry.clone(), + ), + LinkHalf { + client, + select_chain, + persistent_data, + voter_commands_rx, + justification_sender, + justification_stream, + telemetry, + }, + )) +} + +fn global_communication( + set_id: SetId, + voters: &Arc>, + client: Arc, + network: &NetworkBridge, + keystore: Option<&KeystorePtr>, + metrics: Option, +) -> ( + impl Stream< + Item = Result< + CommunicationInH, + CommandOrError>, + >, + >, + impl Sink< + CommunicationOutH, + Error = CommandOrError>, + >, +) +where + BE: Backend + 'static, + C: ClientForGrandpa + 'static, + N: NetworkT, + S: SyncingT, + NumberFor: BlockNumberOps, +{ + let is_voter = local_authority_id(voters, keystore).is_some(); + + // verification stream + let (global_in, global_out) = + network.global_communication(communication::SetId(set_id), voters.clone(), is_voter); + + // block commit and catch up messages until relevant blocks are imported. + let global_in = UntilGlobalMessageBlocksImported::new( + client.import_notification_stream(), + network.clone(), + client.clone(), + global_in, + "global", + metrics, + ); + + let global_in = global_in.map_err(CommandOrError::from); + let global_out = global_out.sink_map_err(CommandOrError::from); + + (global_in, global_out) +} + +/// Parameters used to run Grandpa. +pub struct GrandpaParams { + /// Configuration for the GRANDPA service. + pub config: Config, + /// A link to the block import worker. + pub link: LinkHalf, + /// The Network instance. + /// + /// It is assumed that this network will feed us Grandpa notifications. When using the + /// `sc_network` crate, it is assumed that the Grandpa notifications protocol has been passed + /// to the configuration of the networking. See [`grandpa_peers_set_config`]. + pub network: N, + /// Event stream for syncing-related events. + pub sync: S, + /// A voting rule used to potentially restrict target votes. + pub voting_rule: VR, + /// The prometheus metrics registry. + pub prometheus_registry: Option, + /// The voter state is exposed at an RPC endpoint. + pub shared_voter_state: SharedVoterState, + /// TelemetryHandle instance. + pub telemetry: Option, +} + +/// Returns the configuration value to put in +/// [`sc_network::config::FullNetworkConfiguration`]. +/// For standard protocol name see [`crate::protocol_standard_name`]. +pub fn grandpa_peers_set_config( + protocol_name: ProtocolName, +) -> sc_network::config::NonDefaultSetConfig { + use communication::grandpa_protocol_name; + sc_network::config::NonDefaultSetConfig { + notifications_protocol: protocol_name, + fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), + // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. + max_notification_size: 1024 * 1024, + handshake: None, + set_config: sc_network::config::SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, + }, + } +} + +/// Run a GRANDPA voter as a task. Provide configuration and a link to a +/// block import worker that has already been instantiated with `block_import`. +pub fn run_grandpa_voter( + grandpa_params: GrandpaParams, +) -> sp_blockchain::Result + Send> +where + Block::Hash: Ord, + BE: Backend + 'static, + N: NetworkT + Sync + 'static, + S: SyncingT + Sync + 'static, + SC: SelectChain + 'static, + VR: VotingRule + Clone + 'static, + NumberFor: BlockNumberOps, + C: ClientForGrandpa + 'static, + C::Api: GrandpaApi, +{ + let GrandpaParams { + mut config, + link, + network, + sync, + voting_rule, + prometheus_registry, + shared_voter_state, + telemetry, + } = grandpa_params; + + // NOTE: we have recently removed `run_grandpa_observer` from the public + // API, I felt it is easier to just ignore this field rather than removing + // it from the config temporarily. This should be removed after #5013 is + // fixed and we re-add the observer to the public API. + config.observer_enabled = false; + + let LinkHalf { + client, + select_chain, + persistent_data, + voter_commands_rx, + justification_sender, + justification_stream: _, + telemetry: _, + } = link; + + let network = NetworkBridge::new( + network, + sync, + config.clone(), + persistent_data.set_state.clone(), + prometheus_registry.as_ref(), + telemetry.clone(), + ); + + let conf = config.clone(); + let telemetry_task = + if let Some(telemetry_on_connect) = telemetry.as_ref().map(|x| x.on_connect_stream()) { + let authorities = persistent_data.authority_set.clone(); + let telemetry = telemetry.clone(); + let events = telemetry_on_connect.for_each(move |_| { + let current_authorities = authorities.current_authorities(); + let set_id = authorities.set_id(); + let maybe_authority_id = + local_authority_id(¤t_authorities, conf.keystore.as_ref()); + + let authorities = + current_authorities.iter().map(|(id, _)| id.to_string()).collect::>(); + + let authorities = serde_json::to_string(&authorities).expect( + "authorities is always at least an empty vector; \ + elements are always of type string", + ); + + telemetry!( + telemetry; + CONSENSUS_INFO; + "afg.authority_set"; + "authority_id" => maybe_authority_id.map_or("".into(), |s| s.to_string()), + "authority_set_id" => ?set_id, + "authorities" => authorities, + ); + + future::ready(()) + }); + future::Either::Left(events) + } else { + future::Either::Right(future::pending()) + }; + + let voter_work = VoterWork::new( + client, + config, + network, + select_chain, + voting_rule, + persistent_data, + voter_commands_rx, + prometheus_registry, + shared_voter_state, + justification_sender, + telemetry, + ); + + let voter_work = voter_work.map(|res| match res { + Ok(()) => error!( + target: LOG_TARGET, + "GRANDPA voter future has concluded naturally, this should be unreachable." + ), + Err(e) => error!(target: LOG_TARGET, "GRANDPA voter error: {}", e), + }); + + // Make sure that `telemetry_task` doesn't accidentally finish and kill grandpa. + let telemetry_task = telemetry_task.then(|_| future::pending::<()>()); + + Ok(future::select(voter_work, telemetry_task).map(drop)) +} + +struct Metrics { + environment: environment::Metrics, + until_imported: until_imported::Metrics, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Metrics { + environment: environment::Metrics::register(registry)?, + until_imported: until_imported::Metrics::register(registry)?, + }) + } +} + +/// Future that powers the voter. +#[must_use] +struct VoterWork, S: SyncingT, SC, VR> { + voter: Pin< + Box>>> + Send>, + >, + shared_voter_state: SharedVoterState, + env: Arc>, + voter_commands_rx: TracingUnboundedReceiver>>, + network: NetworkBridge, + telemetry: Option, + /// Prometheus metrics. + metrics: Option, +} + +impl VoterWork +where + Block: BlockT, + B: Backend + 'static, + C: ClientForGrandpa + 'static, + C::Api: GrandpaApi, + N: NetworkT + Sync, + S: SyncingT + Sync, + NumberFor: BlockNumberOps, + SC: SelectChain + 'static, + VR: VotingRule + Clone + 'static, +{ + fn new( + client: Arc, + config: Config, + network: NetworkBridge, + select_chain: SC, + voting_rule: VR, + persistent_data: PersistentData, + voter_commands_rx: TracingUnboundedReceiver>>, + prometheus_registry: Option, + shared_voter_state: SharedVoterState, + justification_sender: GrandpaJustificationSender, + telemetry: Option, + ) -> Self { + let metrics = match prometheus_registry.as_ref().map(Metrics::register) { + Some(Ok(metrics)) => Some(metrics), + Some(Err(e)) => { + debug!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); + None + }, + None => None, + }; + + let voters = persistent_data.authority_set.current_authorities(); + let env = Arc::new(Environment { + client, + select_chain, + voting_rule, + voters: Arc::new(voters), + config, + network: network.clone(), + set_id: persistent_data.authority_set.set_id(), + authority_set: persistent_data.authority_set.clone(), + voter_set_state: persistent_data.set_state, + metrics: metrics.as_ref().map(|m| m.environment.clone()), + justification_sender: Some(justification_sender), + telemetry: telemetry.clone(), + _phantom: PhantomData, + }); + + let mut work = VoterWork { + // `voter` is set to a temporary value and replaced below when + // calling `rebuild_voter`. + voter: Box::pin(future::pending()), + shared_voter_state, + env, + voter_commands_rx, + network, + telemetry, + metrics, + }; + work.rebuild_voter(); + work + } + + /// Rebuilds the `self.voter` field using the current authority set + /// state. This method should be called when we know that the authority set + /// has changed (e.g. as signalled by a voter command). + fn rebuild_voter(&mut self) { + debug!( + target: LOG_TARGET, + "{}: Starting new voter with set ID {}", + self.env.config.name(), + self.env.set_id + ); + + let maybe_authority_id = + local_authority_id(&self.env.voters, self.env.config.keystore.as_ref()); + let authority_id = maybe_authority_id.map_or("".into(), |s| s.to_string()); + + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "afg.starting_new_voter"; + "name" => ?self.env.config.name(), + "set_id" => ?self.env.set_id, + "authority_id" => authority_id, + ); + + let chain_info = self.env.client.info(); + + let authorities = self.env.voters.iter().map(|(id, _)| id.to_string()).collect::>(); + + let authorities = serde_json::to_string(&authorities).expect( + "authorities is always at least an empty vector; elements are always of type string; qed.", + ); + + telemetry!( + self.telemetry; + CONSENSUS_INFO; + "afg.authority_set"; + "number" => ?chain_info.finalized_number, + "hash" => ?chain_info.finalized_hash, + "authority_id" => authority_id, + "authority_set_id" => ?self.env.set_id, + "authorities" => authorities, + ); + + match &*self.env.voter_set_state.read() { + VoterSetState::Live { completed_rounds, .. } => { + let last_finalized = (chain_info.finalized_hash, chain_info.finalized_number); + + let global_comms = global_communication( + self.env.set_id, + &self.env.voters, + self.env.client.clone(), + &self.env.network, + self.env.config.keystore.as_ref(), + self.metrics.as_ref().map(|m| m.until_imported.clone()), + ); + + let last_completed_round = completed_rounds.last(); + + let voter = voter::Voter::new( + self.env.clone(), + (*self.env.voters).clone(), + global_comms, + last_completed_round.number, + last_completed_round.votes.clone(), + last_completed_round.base, + last_finalized, + ); + + // Repoint shared_voter_state so that the RPC endpoint can query the state + if self.shared_voter_state.reset(voter.voter_state()).is_none() { + info!( + target: LOG_TARGET, + "Timed out trying to update shared GRANDPA voter state. \ + RPC endpoints may return stale data." + ); + } + + self.voter = Box::pin(voter); + }, + VoterSetState::Paused { .. } => self.voter = Box::pin(future::pending()), + }; + } + + fn handle_voter_command( + &mut self, + command: VoterCommand>, + ) -> Result<(), Error> { + match command { + VoterCommand::ChangeAuthorities(new) => { + let voters: Vec = + new.authorities.iter().map(move |(a, _)| format!("{}", a)).collect(); + telemetry!( + self.telemetry; + CONSENSUS_INFO; + "afg.voter_command_change_authorities"; + "number" => ?new.canon_number, + "hash" => ?new.canon_hash, + "voters" => ?voters, + "set_id" => ?new.set_id, + ); + + self.env.update_voter_set_state(|_| { + // start the new authority set using the block where the + // set changed (not where the signal happened!) as the base. + let set_state = VoterSetState::live( + new.set_id, + &*self.env.authority_set.inner(), + (new.canon_hash, new.canon_number), + ); + + aux_schema::write_voter_set_state(&*self.env.client, &set_state)?; + Ok(Some(set_state)) + })?; + + let voters = Arc::new(VoterSet::new(new.authorities.into_iter()).expect( + "new authorities come from pending change; pending change comes from \ + `AuthoritySet`; `AuthoritySet` validates authorities is non-empty and \ + weights are non-zero; qed.", + )); + + self.env = Arc::new(Environment { + voters, + set_id: new.set_id, + voter_set_state: self.env.voter_set_state.clone(), + client: self.env.client.clone(), + select_chain: self.env.select_chain.clone(), + config: self.env.config.clone(), + authority_set: self.env.authority_set.clone(), + network: self.env.network.clone(), + voting_rule: self.env.voting_rule.clone(), + metrics: self.env.metrics.clone(), + justification_sender: self.env.justification_sender.clone(), + telemetry: self.telemetry.clone(), + _phantom: PhantomData, + }); + + self.rebuild_voter(); + Ok(()) + }, + VoterCommand::Pause(reason) => { + info!(target: LOG_TARGET, "Pausing old validator set: {}", reason); + + // not racing because old voter is shut down. + self.env.update_voter_set_state(|voter_set_state| { + let completed_rounds = voter_set_state.completed_rounds(); + let set_state = VoterSetState::Paused { completed_rounds }; + + aux_schema::write_voter_set_state(&*self.env.client, &set_state)?; + Ok(Some(set_state)) + })?; + + self.rebuild_voter(); + Ok(()) + }, + } + } +} + +impl Future for VoterWork +where + Block: BlockT, + B: Backend + 'static, + N: NetworkT + Sync, + S: SyncingT + Sync, + NumberFor: BlockNumberOps, + SC: SelectChain + 'static, + C: ClientForGrandpa + 'static, + C::Api: GrandpaApi, + VR: VotingRule + Clone + 'static, +{ + type Output = Result<(), Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match Future::poll(Pin::new(&mut self.voter), cx) { + Poll::Pending => {}, + Poll::Ready(Ok(())) => { + // voters don't conclude naturally + return Poll::Ready(Err(Error::Safety( + "consensus-grandpa inner voter has concluded.".into(), + ))) + }, + Poll::Ready(Err(CommandOrError::Error(e))) => { + // return inner observer error + return Poll::Ready(Err(e)) + }, + Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { + // some command issued internally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + }, + } + + match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { + Poll::Pending => {}, + Poll::Ready(None) => { + // the `voter_commands_rx` stream should never conclude since it's never closed. + return Poll::Ready(Err(Error::Safety("`voter_commands_rx` was closed.".into()))) + }, + Poll::Ready(Some(command)) => { + // some command issued externally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + }, + } + + Future::poll(Pin::new(&mut self.network), cx) + } +} + +/// Checks if this node has any available keys in the keystore for any authority id in the given +/// voter set. Returns the authority id for which keys are available, or `None` if no keys are +/// available. +fn local_authority_id( + voters: &VoterSet, + keystore: Option<&KeystorePtr>, +) -> Option { + keystore.and_then(|keystore| { + voters + .iter() + .find(|(p, _)| keystore.has_keys(&[(p.to_raw_vec(), AuthorityId::ID)])) + .map(|(p, _)| p.clone()) + }) +} + +/// Reverts protocol aux data to at most the last finalized block. +/// In particular, standard and forced authority set changes announced after the +/// revert point are removed. +pub fn revert(client: Arc, blocks: NumberFor) -> ClientResult<()> +where + Block: BlockT, + Client: AuxStore + HeaderMetadata + HeaderBackend, +{ + let best_number = client.info().best_number; + let finalized = client.info().finalized_number; + + let revertible = blocks.min(best_number - finalized); + if revertible == Zero::zero() { + return Ok(()) + } + + let number = best_number - revertible; + let hash = client + .block_hash_from_id(&BlockId::Number(number))? + .ok_or(ClientError::Backend(format!( + "Unexpected hash lookup failure for block number: {}", + number + )))?; + + let info = client.info(); + + let persistent_data: PersistentData = + aux_schema::load_persistent(&*client, info.genesis_hash, Zero::zero(), || { + const MSG: &str = "Unexpected missing grandpa data during revert"; + Err(ClientError::Application(Box::from(MSG))) + })?; + + let shared_authority_set = persistent_data.authority_set; + let mut authority_set = shared_authority_set.inner(); + + let is_descendent_of = is_descendent_of(&*client, None); + authority_set.revert(hash, number, &is_descendent_of); + + // The following has the side effect to properly reset the current voter state. + let (set_id, set_ref) = authority_set.current(); + let new_set = Some(NewAuthoritySet { + canon_hash: info.finalized_hash, + canon_number: info.finalized_number, + set_id, + authorities: set_ref.to_vec(), + }); + aux_schema::update_authority_set::(&authority_set, new_set.as_ref(), |values| { + client.insert_aux(values, None) + }) +} diff --git a/substrate/client/consensus/grandpa/src/notification.rs b/substrate/client/consensus/grandpa/src/notification.rs new file mode 100644 index 00000000..de1fba09 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/notification.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_utils::notification::{NotificationSender, NotificationStream, TracingKeyStr}; + +use crate::justification::GrandpaJustification; + +/// The sending half of the Grandpa justification channel(s). +/// +/// Used to send notifications about justifications generated +/// at the end of a Grandpa round. +pub type GrandpaJustificationSender = NotificationSender>; + +/// The receiving half of the Grandpa justification channel. +/// +/// Used to receive notifications about justifications generated +/// at the end of a Grandpa round. +/// The `GrandpaJustificationStream` entity stores the `SharedJustificationSenders` +/// so it can be used to add more subscriptions. +pub type GrandpaJustificationStream = + NotificationStream, GrandpaJustificationsTracingKey>; + +/// Provides tracing key for GRANDPA justifications stream. +#[derive(Clone)] +pub struct GrandpaJustificationsTracingKey; +impl TracingKeyStr for GrandpaJustificationsTracingKey { + const TRACING_KEY: &'static str = "mpsc_grandpa_justification_notification_stream"; +} diff --git a/substrate/client/consensus/grandpa/src/observer.rs b/substrate/client/consensus/grandpa/src/observer.rs new file mode 100644 index 00000000..8541baa8 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/observer.rs @@ -0,0 +1,475 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{ + marker::{PhantomData, Unpin}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use finality_grandpa::{voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError}; +use futures::prelude::*; +use log::{debug, info, warn}; + +use sc_client_api::backend::Backend; +use sc_telemetry::TelemetryHandle; +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_blockchain::HeaderMetadata; +use sp_consensus::SelectChain; +use sp_consensus_grandpa::AuthorityId; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +use crate::{ + authorities::SharedAuthoritySet, + aux_schema::PersistentData, + communication::{Network as NetworkT, NetworkBridge, Syncing as SyncingT}, + environment, global_communication, + notification::GrandpaJustificationSender, + ClientForGrandpa, CommandOrError, CommunicationIn, Config, Error, LinkHalf, VoterCommand, + VoterSetState, LOG_TARGET, +}; + +struct ObserverChain<'a, Block: BlockT, Client> { + client: &'a Arc, + _phantom: PhantomData, +} + +impl<'a, Block, Client> finality_grandpa::Chain> + for ObserverChain<'a, Block, Client> +where + Block: BlockT, + Client: HeaderMetadata, + NumberFor: BlockNumberOps, +{ + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + environment::ancestry(self.client, base, block) + } +} + +fn grandpa_observer( + client: &Arc, + authority_set: &SharedAuthoritySet>, + voters: &Arc>, + justification_sender: &Option>, + last_finalized_number: NumberFor, + commits: S, + note_round: F, + telemetry: Option, +) -> impl Future>>> +where + NumberFor: BlockNumberOps, + S: Stream, CommandOrError>>>, + F: Fn(u64), + BE: Backend, + Client: ClientForGrandpa, +{ + let authority_set = authority_set.clone(); + let client = client.clone(); + let voters = voters.clone(); + let justification_sender = justification_sender.clone(); + + let observer = commits.try_fold(last_finalized_number, move |last_finalized_number, global| { + let (round, commit, callback) = match global { + voter::CommunicationIn::Commit(round, commit, callback) => { + let commit = finality_grandpa::Commit::from(commit); + (round, commit, callback) + }, + voter::CommunicationIn::CatchUp(..) => { + // ignore catch up messages + return future::ok(last_finalized_number) + }, + }; + + // if the commit we've received targets a block lower or equal to the last + // finalized, ignore it and continue with the current state + if commit.target_number <= last_finalized_number { + return future::ok(last_finalized_number) + } + + let validation_result = match finality_grandpa::validate_commit( + &commit, + &voters, + &ObserverChain { client: &client, _phantom: PhantomData }, + ) { + Ok(r) => r, + Err(e) => return future::err(e.into()), + }; + + if validation_result.is_valid() { + let finalized_hash = commit.target_hash; + let finalized_number = commit.target_number; + + // commit is valid, finalize the block it targets + match environment::finalize_block( + client.clone(), + &authority_set, + None, + finalized_hash, + finalized_number, + (round, commit).into(), + false, + justification_sender.as_ref(), + telemetry.clone(), + ) { + Ok(_) => {}, + Err(e) => return future::err(e), + }; + + // note that we've observed completion of this round through the commit, + // and that implies that the next round has started. + note_round(round + 1); + + finality_grandpa::process_commit_validation_result(validation_result, callback); + + // proceed processing with new finalized block number + future::ok(finalized_number) + } else { + debug!(target: LOG_TARGET, "Received invalid commit: ({:?}, {:?})", round, commit); + + finality_grandpa::process_commit_validation_result(validation_result, callback); + + // commit is invalid, continue processing commits with the current state + future::ok(last_finalized_number) + } + }); + + observer.map_ok(|_| ()) +} + +/// Run a GRANDPA observer as a task, the observer will finalize blocks only by +/// listening for and validating GRANDPA commits instead of following the full +/// protocol. Provide configuration and a link to a block import worker that has +/// already been instantiated with `block_import`. +/// NOTE: this is currently not part of the crate's public API since we don't consider +/// it stable enough to use on a live network. +pub fn run_grandpa_observer( + config: Config, + link: LinkHalf, + network: N, + sync: S, +) -> sp_blockchain::Result + Send> +where + BE: Backend + Unpin + 'static, + N: NetworkT, + S: SyncingT, + SC: SelectChain, + NumberFor: BlockNumberOps, + Client: ClientForGrandpa + 'static, +{ + let LinkHalf { + client, + persistent_data, + voter_commands_rx, + justification_sender, + telemetry, + .. + } = link; + + let network = NetworkBridge::new( + network, + sync, + config.clone(), + persistent_data.set_state.clone(), + None, + telemetry.clone(), + ); + + let observer_work = ObserverWork::new( + client, + network, + persistent_data, + config.keystore, + voter_commands_rx, + Some(justification_sender), + telemetry, + ); + + let observer_work = observer_work.map_ok(|_| ()).map_err(|e| { + warn!("GRANDPA Observer failed: {}", e); + }); + + Ok(observer_work.map(drop)) +} + +/// Future that powers the observer. +#[must_use] +struct ObserverWork, S: SyncingT> { + observer: + Pin>>> + Send>>, + client: Arc, + network: NetworkBridge, + persistent_data: PersistentData, + keystore: Option, + voter_commands_rx: TracingUnboundedReceiver>>, + justification_sender: Option>, + telemetry: Option, + _phantom: PhantomData, +} + +impl ObserverWork +where + B: BlockT, + BE: Backend + 'static, + Client: ClientForGrandpa + 'static, + Network: NetworkT, + Syncing: SyncingT, + NumberFor: BlockNumberOps, +{ + fn new( + client: Arc, + network: NetworkBridge, + persistent_data: PersistentData, + keystore: Option, + voter_commands_rx: TracingUnboundedReceiver>>, + justification_sender: Option>, + telemetry: Option, + ) -> Self { + let mut work = ObserverWork { + // `observer` is set to a temporary value and replaced below when + // calling `rebuild_observer`. + observer: Box::pin(future::pending()) as Pin>, + client, + network, + persistent_data, + keystore: keystore.clone(), + voter_commands_rx, + justification_sender, + telemetry, + _phantom: PhantomData, + }; + work.rebuild_observer(); + work + } + + /// Rebuilds the `self.observer` field using the current authority set + /// state. This method should be called when we know that the authority set + /// has changed (e.g. as signalled by a voter command). + fn rebuild_observer(&mut self) { + let set_id = self.persistent_data.authority_set.set_id(); + let voters = Arc::new(self.persistent_data.authority_set.current_authorities()); + + // start global communication stream for the current set + let (global_in, _) = global_communication( + set_id, + &voters, + self.client.clone(), + &self.network, + self.keystore.as_ref(), + None, + ); + + let last_finalized_number = self.client.info().finalized_number; + + // NOTE: since we are not using `round_communication` we have to + // manually note the round with the gossip validator, otherwise we won't + // relay round messages. we want all full nodes to contribute to vote + // availability. + let note_round = { + let network = self.network.clone(); + let voters = voters.clone(); + + move |round| { + network.note_round( + crate::communication::Round(round), + crate::communication::SetId(set_id), + &voters, + ) + } + }; + + // create observer for the current set + let observer = grandpa_observer( + &self.client, + &self.persistent_data.authority_set, + &voters, + &self.justification_sender, + last_finalized_number, + global_in, + note_round, + self.telemetry.clone(), + ); + + self.observer = Box::pin(observer); + } + + fn handle_voter_command( + &mut self, + command: VoterCommand>, + ) -> Result<(), Error> { + // the observer doesn't use the voter set state, but we need to + // update it on-disk in case we restart as validator in the future. + self.persistent_data.set_state = match command { + VoterCommand::Pause(reason) => { + info!(target: LOG_TARGET, "Pausing old validator set: {}", reason); + + let completed_rounds = self.persistent_data.set_state.read().completed_rounds(); + let set_state = VoterSetState::Paused { completed_rounds }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + set_state + }, + VoterCommand::ChangeAuthorities(new) => { + // start the new authority set using the block where the + // set changed (not where the signal happened!) as the base. + let set_state = VoterSetState::live( + new.set_id, + &*self.persistent_data.authority_set.inner(), + (new.canon_hash, new.canon_number), + ); + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + set_state + }, + } + .into(); + + self.rebuild_observer(); + Ok(()) + } +} + +impl Future for ObserverWork +where + B: BlockT, + BE: Backend + Unpin + 'static, + C: ClientForGrandpa + 'static, + N: NetworkT, + S: SyncingT, + NumberFor: BlockNumberOps, +{ + type Output = Result<(), Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match Future::poll(Pin::new(&mut self.observer), cx) { + Poll::Pending => {}, + Poll::Ready(Ok(())) => { + // observer commit stream doesn't conclude naturally; this could reasonably be an + // error. + return Poll::Ready(Ok(())) + }, + Poll::Ready(Err(CommandOrError::Error(e))) => { + // return inner observer error + return Poll::Ready(Err(e)) + }, + Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { + // some command issued internally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + }, + } + + match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { + Poll::Pending => {}, + Poll::Ready(None) => { + // the `voter_commands_rx` stream should never conclude since it's never closed. + return Poll::Ready(Ok(())) + }, + Poll::Ready(Some(command)) => { + // some command issued externally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + }, + } + + Future::poll(Pin::new(&mut self.network), cx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use crate::{ + aux_schema, + communication::tests::{make_test_network, Event}, + }; + use assert_matches::assert_matches; + use sc_network::PeerId; + use sc_utils::mpsc::tracing_unbounded; + use sp_blockchain::HeaderBackend as _; + use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; + + use futures::executor; + + /// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. Regression + /// test for bug introduced in d4fbb897c and fixed in b7af8b339. + /// + /// When polled, `NetworkBridge` forwards reputation change requests from the `GossipValidator` + /// to the underlying `dyn Network`. This test triggers a reputation change by calling + /// `GossipValidator::validate` with an invalid gossip message. After polling the `ObserverWork` + /// which should poll the `NetworkBridge`, the reputation change should be forwarded to the test + /// network. + #[test] + fn observer_work_polls_underlying_network_bridge() { + // Create a test network. + let (tester_fut, _network) = make_test_network(); + let mut tester = executor::block_on(tester_fut); + + // Create an observer. + let (client, backend) = { + let builder = TestClientBuilder::with_default_backend(); + let backend = builder.backend(); + let (client, _) = builder.build_with_longest_chain(); + (Arc::new(client), backend) + }; + + let voters = vec![(sp_keyring::Ed25519Keyring::Alice.public().into(), 1)]; + + let persistent_data = + aux_schema::load_persistent(&*backend, client.info().genesis_hash, 0, || Ok(voters)) + .unwrap(); + + let (_tx, voter_command_rx) = tracing_unbounded("test_mpsc_voter_command", 100_000); + + let observer = ObserverWork::new( + client, + tester.net_handle.clone(), + persistent_data, + None, + voter_command_rx, + None, + None, + ); + + // Trigger a reputation change through the gossip validator. + let peer_id = PeerId::random(); + tester.trigger_gossip_validator_reputation_change(&peer_id); + + executor::block_on(async move { + // Poll the observer once and have it forward the reputation change from the gossip + // validator to the test network. + assert!(observer.now_or_never().is_none()); + + // Ignore initial event stream request by gossip engine. + match tester.events.next().now_or_never() { + Some(Some(Event::EventStream(_))) => {}, + _ => panic!("expected event stream request"), + }; + + assert_matches!(tester.events.next().now_or_never(), Some(Some(Event::Report(_, _)))); + }); + } +} diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs new file mode 100644 index 00000000..726ae89e --- /dev/null +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -0,0 +1,2145 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tests and test helpers for GRANDPA. + +use super::*; +use assert_matches::assert_matches; +use async_trait::async_trait; +use environment::HasVoted; +use futures_timer::Delay; +use parking_lot::{Mutex, RwLock}; +use sc_consensus::{ + BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, + ImportedAux, +}; +use sc_network::config::Role; +use sc_network_test::{ + Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, + PeersFullClient, TestClient, TestNetFactory, +}; +use sp_api::{ApiRef, ProvideRuntimeApi}; +use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; +use sp_consensus_grandpa::{ + AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, GRANDPA_ENGINE_ID, +}; +use sp_core::H256; +use sp_keyring::Ed25519Keyring; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; +use sp_runtime::{ + codec::Encode, + generic::{BlockId, DigestItem}, + traits::{Block as BlockT, Header as HeaderT}, + Justifications, +}; +use std::{collections::HashSet, pin::Pin}; +use substrate_test_runtime_client::{runtime::BlockNumber, BlockBuilderExt}; +use tokio::runtime::Handle; + +use authorities::AuthoritySet; +use communication::grandpa_protocol_name; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_consensus::LongestChain; +use sp_application_crypto::key_types::GRANDPA; + +type TestLinkHalf = + LinkHalf>; +type PeerData = Mutex>; +type GrandpaPeer = Peer; +type GrandpaBlockImport = crate::GrandpaBlockImport< + substrate_test_runtime_client::Backend, + Block, + PeersFullClient, + LongestChain, +>; + +const JUSTIFICATION_IMPORT_PERIOD: u32 = 32; + +#[derive(Default)] +struct GrandpaTestNet { + peers: Vec, + test_config: TestApi, +} + +impl GrandpaTestNet { + fn new(test_config: TestApi, n_authority: usize, n_full: usize) -> Self { + let mut net = + GrandpaTestNet { peers: Vec::with_capacity(n_authority + n_full), test_config }; + + for _ in 0..n_authority { + net.add_authority_peer(); + } + + for _ in 0..n_full { + net.add_full_peer(); + } + + net + } +} + +impl GrandpaTestNet { + fn add_authority_peer(&mut self) { + self.add_full_peer_with_config(FullPeerConfig { + notifications_protocols: vec![grandpa_protocol_name::NAME.into()], + is_authority: true, + ..Default::default() + }) + } +} + +impl TestNetFactory for GrandpaTestNet { + type Verifier = PassThroughVerifier; + type PeerData = PeerData; + type BlockImport = GrandpaBlockImport; + + fn add_full_peer(&mut self) { + self.add_full_peer_with_config(FullPeerConfig { + notifications_protocols: vec![grandpa_protocol_name::NAME.into()], + is_authority: false, + ..Default::default() + }) + } + + fn make_verifier(&self, _client: PeersClient, _: &PeerData) -> Self::Verifier { + PassThroughVerifier::new(false) // use non-instant finality. + } + + fn make_block_import( + &self, + client: PeersClient, + ) -> (BlockImportAdapter, Option>, PeerData) { + let (client, backend) = (client.as_client(), client.as_backend()); + let (import, link) = block_import( + client.clone(), + JUSTIFICATION_IMPORT_PERIOD, + &self.test_config, + LongestChain::new(backend.clone()), + None, + ) + .expect("Could not create block import for fresh peer."); + let justification_import = Box::new(import.clone()); + (BlockImportAdapter::new(import), Some(justification_import), Mutex::new(Some(link))) + } + + fn peer(&mut self, i: usize) -> &mut GrandpaPeer { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec { + &self.peers + } + + fn peers_mut(&mut self) -> &mut Vec { + &mut self.peers + } + + fn mut_peers)>(&mut self, closure: F) { + closure(&mut self.peers); + } +} + +#[derive(Default, Clone)] +pub(crate) struct TestApi { + genesis_authorities: AuthorityList, +} + +impl TestApi { + pub fn new(genesis_authorities: AuthorityList) -> Self { + TestApi { genesis_authorities } + } +} + +pub(crate) struct RuntimeApi { + inner: TestApi, +} + +impl ProvideRuntimeApi for TestApi { + type Api = RuntimeApi; + + fn runtime_api(&self) -> ApiRef<'_, Self::Api> { + RuntimeApi { inner: self.clone() }.into() + } +} + +sp_api::mock_impl_runtime_apis! { + impl GrandpaApi for RuntimeApi { + fn grandpa_authorities(&self) -> AuthorityList { + self.inner.genesis_authorities.clone() + } + + fn current_set_id(&self) -> SetId { + 0 + } + + fn submit_report_equivocation_unsigned_extrinsic( + _equivocation_proof: EquivocationProof, + _key_owner_proof: OpaqueKeyOwnershipProof, + ) -> Option<()> { + None + } + + fn generate_key_ownership_proof( + _set_id: SetId, + _authority_id: AuthorityId, + ) -> Option { + None + } + } +} + +impl GenesisAuthoritySetProvider for TestApi { + fn get(&self) -> sp_blockchain::Result { + Ok(self.genesis_authorities.clone()) + } +} + +/// A mock `SelectChain` that allows the user to set the return values for each +/// method. After the `SelectChain` methods are called the pending value is +/// discarded and another call to set new values must be performed. +#[derive(Clone, Default)] +struct MockSelectChain { + leaves: Arc>>>, + best_chain: Arc::Header>>>, + finality_target: Arc>>, +} + +impl MockSelectChain { + fn set_best_chain(&self, best: ::Header) { + *self.best_chain.lock() = Some(best); + } + + fn set_finality_target(&self, target: Hash) { + *self.finality_target.lock() = Some(target); + } +} + +#[async_trait] +impl SelectChain for MockSelectChain { + async fn leaves(&self) -> Result, ConsensusError> { + Ok(self.leaves.lock().take().unwrap()) + } + + async fn best_chain(&self) -> Result<::Header, ConsensusError> { + Ok(self.best_chain.lock().take().unwrap()) + } + + async fn finality_target( + &self, + _base_hash: Hash, + _maybe_max_number: Option>, + ) -> Result { + Ok(self.finality_target.lock().take().unwrap()) + } +} + +// A mock voting rule that allows asserting an expected value for best block +#[derive(Clone, Default)] +struct AssertBestBlock(Arc>>); + +impl VotingRule for AssertBestBlock +where + B: HeaderBackend, +{ + fn restrict_vote( + &self, + _backend: Arc, + _base: &::Header, + best_target: &::Header, + _current_target: &::Header, + ) -> VotingRuleResult { + if let Some(expected) = *self.0.lock() { + assert_eq!(best_target.hash(), expected); + } + + Box::pin(std::future::ready(None)) + } +} + +impl AssertBestBlock { + fn set_expected_best_block(&self, hash: Hash) { + *self.0.lock() = Some(hash); + } +} + +const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); + +fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { + keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() +} + +fn create_keystore(authority: Ed25519Keyring) -> KeystorePtr { + let keystore = MemoryKeystore::new(); + keystore + .ed25519_generate_new(GRANDPA, Some(&authority.to_seed())) + .expect("Creates authority key"); + keystore.into() +} + +async fn run_until_complete(future: impl Future + Unpin, net: &Arc>) { + let drive_to_completion = futures::future::poll_fn(|cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }); + future::select(future, drive_to_completion).await; +} + +// Spawns grandpa voters. Returns a future to spawn on the runtime. +fn initialize_grandpa( + net: &mut GrandpaTestNet, + peers: &[Ed25519Keyring], +) -> impl Future { + let voters = stream::FuturesUnordered::new(); + + for (peer_id, key) in peers.iter().enumerate() { + let keystore = create_keystore(*key); + + let (net_service, link) = { + // temporary needed for some reason + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].network_service().clone(), link) + }; + let sync = net.peers[peer_id].sync_service().clone(); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }, + link, + network: net_service, + sync, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + let voter = + run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + + fn assert_send(_: &T) {} + assert_send(&voter); + + voters.push(voter); + } + + voters.for_each(|_| async move {}) +} + +// run the voters to completion. provide a closure to be invoked after +// the voters are spawned but before blocking on them. +async fn run_to_completion_with( + blocks: u64, + net: Arc>, + peers: &[Ed25519Keyring], + with: F, +) -> u64 +where + F: FnOnce(Handle) -> Option>>>, +{ + let mut wait_for = Vec::new(); + + let highest_finalized = Arc::new(RwLock::new(0)); + + if let Some(f) = (with)(Handle::current()) { + wait_for.push(f); + }; + + for (peer_id, _) in peers.iter().enumerate() { + let highest_finalized = highest_finalized.clone(); + let client = net.lock().peers[peer_id].client().clone(); + + wait_for.push(Box::pin( + client + .finality_notification_stream() + .take_while(move |n| { + let mut highest_finalized = highest_finalized.write(); + if *n.header.number() > *highest_finalized { + *highest_finalized = *n.header.number(); + } + future::ready(n.header.number() < &blocks) + }) + .collect::>() + .map(|_| ()), + )); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(wait_for); + + run_until_complete(wait_for, &net).await; + let highest_finalized = *highest_finalized.read(); + highest_finalized +} + +async fn run_to_completion( + blocks: u64, + net: Arc>, + peers: &[Ed25519Keyring], +) -> u64 { + run_to_completion_with(blocks, net, peers, |_| None).await +} + +fn add_scheduled_change(builder: &mut impl BlockBuilderExt, change: ScheduledChange) { + builder + .push_deposit_log_digest_item(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + sp_consensus_grandpa::ConsensusLog::ScheduledChange(change).encode(), + )) + .unwrap(); +} + +fn add_forced_change( + builder: &mut impl BlockBuilderExt, + median_last_finalized: BlockNumber, + change: ScheduledChange, +) { + builder + .push_deposit_log_digest_item(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + sp_consensus_grandpa::ConsensusLog::ForcedChange(median_last_finalized, change) + .encode(), + )) + .unwrap(); +} + +#[tokio::test] +async fn finalize_3_voters_no_observers() { + sp_tracing::try_init_simple(); + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0); + tokio::spawn(initialize_grandpa(&mut net, peers)); + net.peer(0).push_blocks(20, false); + net.run_until_sync().await; + let hashof20 = net.peer(0).client().info().best_hash; + + for i in 0..3 { + assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_hash, hashof20, "Peer #{} failed to sync", i); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(20, net.clone(), peers).await; + + // all peers should have stored the justification for the best finalized block #20 + for peer_id in 0..3 { + let client = net.lock().peers[peer_id].client().as_client(); + let justification = + crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); + + assert_eq!(justification.justification.commit.target_number, 20); + } +} + +#[tokio::test] +async fn finalize_3_voters_1_full_observer() { + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); + tokio::spawn(initialize_grandpa(&mut net, peers)); + + tokio::spawn({ + let peer_id = 3; + let net_service = net.peers[peer_id].network_service().clone(); + let sync = net.peers[peer_id].sync_service().clone(); + let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore: None, + name: Some(format!("peer#{}", peer_id)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }, + link, + network: net_service, + sync, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); + + net.peer(0).push_blocks(20, false); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + for peer_id in 0..4 { + let client = net.lock().peers[peer_id].client().clone(); + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &20)) + .for_each(move |_| future::ready(())), + ); + } + + // wait for all finalized on each. + let wait_for = futures::future::join_all(finality_notifications).map(|_| ()); + + run_until_complete(wait_for, &net).await; + + // all peers should have stored the justification for the best finalized block #20 + for peer_id in 0..4 { + let client = net.lock().peers[peer_id].client().as_client(); + let justification = + crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); + + assert_eq!(justification.justification.commit.target_number, 20); + } +} + +#[tokio::test] +async fn transition_3_voters_twice_1_full_observer() { + sp_tracing::try_init_simple(); + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + + let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; + + let peers_c = &[Ed25519Keyring::Alice, Ed25519Keyring::Eve, Ed25519Keyring::Two]; + + let observer = &[Ed25519Keyring::One]; + + let all_peers = peers_a + .iter() + .chain(peers_b) + .chain(peers_c) + .chain(observer) + .cloned() + .collect::>(); // deduplicate + + let genesis_voters = make_ids(peers_a); + + let api = TestApi::new(genesis_voters); + let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8, 1))); + + let mut voters = Vec::new(); + for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { + let keystore = create_keystore(local_key); + + let (net_service, link, sync) = { + let net = net.lock(); + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + ( + net.peers[peer_id].network_service().clone(), + link, + net.peers[peer_id].sync_service().clone(), + ) + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }, + link, + network: net_service, + sync, + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + voters + .push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); + } + + net.lock().peer(0).push_blocks(1, false); + net.lock().run_until_sync().await; + + for (i, peer) in net.lock().peers().iter().enumerate() { + let full_client = peer.client().as_client(); + assert_eq!(full_client.chain_info().best_number, 1, "Peer #{} failed to sync", i); + + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); + + assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); + assert_eq!(set.pending_changes().count(), 0); + } + + { + let net = net.clone(); + let client = net.lock().peers[0].client().clone(); + let peers_c = *peers_c; + + // wait for blocks to be finalized before generating new ones + let block_production = client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &30)) + .for_each(move |n| { + match n.header.number() { + 1 => { + // first 14 blocks. + net.lock().peer(0).push_blocks(13, false); + }, + 14 => { + // generate transition at block 15, applied at 20. + net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |mut builder| { + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 4 }, + ); + builder.build().unwrap().block + }); + net.lock().peer(0).push_blocks(5, false); + }, + 20 => { + // at block 21 we do another transition, but this time instant. + // add more until we have 30. + net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |mut builder| { + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(&peers_c), delay: 0 }, + ); + builder.build().unwrap().block + }); + net.lock().peer(0).push_blocks(9, false); + }, + _ => {}, + } + + future::ready(()) + }); + + tokio::spawn(block_production); + } + + let mut finality_notifications = Vec::new(); + + for voter in voters { + tokio::spawn(voter); + } + + for (peer_id, _) in all_peers.into_iter().enumerate() { + let client = net.lock().peers[peer_id].client().clone(); + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &30)) + .for_each(move |_| future::ready(())) + .map(move |()| { + let full_client = client.as_client(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); + + assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); + assert_eq!(set.pending_changes().count(), 0); + }), + ); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(finality_notifications); + + run_until_complete(wait_for, &net).await; +} + +#[tokio::test] +async fn justification_is_generated_periodically() { + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 0); + tokio::spawn(initialize_grandpa(&mut net, peers)); + net.peer(0).push_blocks(32, false); + net.run_until_sync().await; + + let hashof32 = net.peer(0).client().info().best_hash; + + let net = Arc::new(Mutex::new(net)); + run_to_completion(32, net.clone(), peers).await; + + // when block#32 (justification_generation_period) is finalized, + // justification is required => generated + for i in 0..3 { + assert!(net.lock().peer(i).client().justifications(hashof32).unwrap().is_some()); + } +} + +#[tokio::test] +async fn sync_justifications_on_change_blocks() { + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers_b); + + // 4 peers, 3 of them are authorities and participate in grandpa + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api, 3, 1); + let voters = initialize_grandpa(&mut net, peers_a); + + // add 20 blocks + net.peer(0).push_blocks(20, false); + + // at block 21 we do add a transition which is instant + let hashof21 = net + .peer(0) + .generate_blocks(1, BlockOrigin::File, |mut builder| { + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); + builder.build().unwrap().block + }) + .pop() + .unwrap(); + + // add more blocks on top of it (until we have 25) + net.peer(0).push_blocks(4, false); + net.run_until_sync().await; + + for i in 0..4 { + assert_eq!(net.peer(i).client().info().best_number, 25, "Peer #{} failed to sync", i); + } + + let net = Arc::new(Mutex::new(net)); + tokio::spawn(voters); + run_to_completion(25, net.clone(), peers_a).await; + + // the first 3 peers are grandpa voters and therefore have already finalized + // block 21 and stored a justification + for i in 0..3 { + assert!(net.lock().peer(i).client().justifications(hashof21).unwrap().is_some()); + } + + // the last peer should get the justification by syncing from other peers + futures::future::poll_fn(move |cx| { + if net.lock().peer(3).client().justifications(hashof21).unwrap().is_none() { + net.lock().poll(cx); + Poll::Pending + } else { + Poll::Ready(()) + } + }) + .await; +} + +#[tokio::test] +async fn finalizes_multiple_pending_changes_in_order() { + sp_tracing::try_init_simple(); + + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; + let peers_c = &[Ed25519Keyring::Dave, Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + + let all_peers = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + Ed25519Keyring::Dave, + Ed25519Keyring::Eve, + Ed25519Keyring::Ferdie, + ]; + let genesis_voters = make_ids(peers_a); + + // 6 peers, 3 of them are authorities and participate in grandpa from genesis + // but all of them will be part of the voter set eventually so they should be + // all added to the network as authorities + let api = TestApi::new(genesis_voters); + let mut net = GrandpaTestNet::new(api, 6, 0); + tokio::spawn(initialize_grandpa(&mut net, all_peers)); + + // add 20 blocks + net.peer(0).push_blocks(20, false); + + // at block 21 we do add a transition which is instant + net.peer(0).generate_blocks(1, BlockOrigin::File, |mut builder| { + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); + builder.build().unwrap().block + }); + + // add more blocks on top of it (until we have 25) + net.peer(0).push_blocks(4, false); + + // at block 26 we add another which is enacted at block 30 + net.peer(0).generate_blocks(1, BlockOrigin::File, |mut builder| { + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(peers_c), delay: 4 }, + ); + builder.build().unwrap().block + }); + + // add more blocks on top of it (until we have 30) + net.peer(0).push_blocks(4, false); + + net.run_until_sync().await; + + // all peers imported both change blocks + for i in 0..6 { + assert_eq!(net.peer(i).client().info().best_number, 30, "Peer #{} failed to sync", i); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(30, net.clone(), all_peers).await; +} + +#[tokio::test] +async fn force_change_to_new_set() { + sp_tracing::try_init_simple(); + // two of these guys are offline. + let genesis_authorities = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + Ed25519Keyring::One, + Ed25519Keyring::Two, + ]; + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let api = TestApi::new(make_ids(genesis_authorities)); + + let voters = make_ids(peers_a); + let mut net = GrandpaTestNet::new(api, 3, 0); + let voters_future = initialize_grandpa(&mut net, peers_a); + let net = Arc::new(Mutex::new(net)); + + net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |mut builder| { + // add a forced transition at block 12. + add_forced_change( + &mut builder, + 0, + ScheduledChange { next_authorities: voters.clone(), delay: 10 }, + ); + + // add a normal transition too to ensure that forced changes take priority. + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(genesis_authorities), delay: 5 }, + ); + + builder.build().unwrap().block + }); + + net.lock().peer(0).push_blocks(25, false); + net.lock().run_until_sync().await; + + for (i, peer) in net.lock().peers().iter().enumerate() { + assert_eq!(peer.client().info().best_number, 26, "Peer #{} failed to sync", i); + + let full_client = peer.client().as_client(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); + + assert_eq!(set.current(), (1, voters.as_slice())); + assert_eq!(set.pending_changes().count(), 0); + } + + // it will only finalize if the forced transition happens. + // we add_blocks after the voters are spawned because otherwise + // the link-halves have the wrong AuthoritySet + tokio::spawn(voters_future); + run_to_completion(25, net, peers_a).await; +} + +#[tokio::test] +async fn allows_reimporting_change_blocks() { + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers_a); + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api.clone(), 3, 0); + + let client = net.peer(0).client().clone(); + let (mut block_import, ..) = net.make_block_import(client.clone()); + + let full_client = client.as_client(); + let mut builder = full_client + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + .unwrap(); + + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); + let block = builder.build().unwrap().block; + + let block = || { + let block = block.clone(); + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.body = Some(block.extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + import + }; + + assert_eq!( + block_import.import_block(block()).await.unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: false, + is_new_best: true, + header_only: false, + }), + ); + + assert_eq!(block_import.import_block(block()).await.unwrap(), ImportResult::AlreadyInChain); +} + +#[tokio::test] +async fn test_bad_justification() { + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers_a); + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api.clone(), 3, 0); + + let client = net.peer(0).client().clone(); + let (mut block_import, ..) = net.make_block_import(client.clone()); + + let full_client = client.as_client(); + let mut builder = full_client + .new_block_at(full_client.chain_info().genesis_hash, Default::default(), false) + .unwrap(); + + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); + + let block = builder.build().unwrap().block; + + let block = || { + let block = block.clone(); + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.justifications = Some(Justifications::from((GRANDPA_ENGINE_ID, Vec::new()))); + import.body = Some(block.extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + import + }; + + assert_eq!( + block_import.import_block(block()).await.unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: true, + is_new_best: true, + ..Default::default() + }), + ); + + assert_eq!(block_import.import_block(block()).await.unwrap(), ImportResult::AlreadyInChain); +} + +#[tokio::test] +async fn voter_persists_its_votes() { + use futures::future; + use std::sync::atomic::{AtomicUsize, Ordering}; + + sp_tracing::try_init_simple(); + + // we have two authorities but we'll only be running the voter for alice + // we are going to be listening for the prevotes it casts + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers); + + // alice has a chain with 20 blocks + let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2, 0); + + // create the communication layer for bob, but don't start any + // voter. instead we'll listen for the prevote that alice casts + // and cast our own manually + let bob_keystore = create_keystore(peers[1]); + let bob_network = { + let config = Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore: Some(bob_keystore.clone()), + name: Some(format!("peer#{}", 1)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }; + + let set_state = { + let bob_client = net.peer(1).client().clone(); + let (_, _, link) = net.make_block_import(bob_client); + let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); + let PersistentData { set_state, .. } = persistent_data; + set_state + }; + + communication::NetworkBridge::new( + net.peers[1].network_service().clone(), + net.peers[1].sync_service().clone(), + config.clone(), + set_state, + None, + None, + ) + }; + + // spawn two voters for alice. + // half-way through the test, we stop one and start the other. + let (alice_voter1, abort) = future::abortable({ + let keystore = create_keystore(peers[0]); + + let (net_service, link) = { + // temporary needed for some reason + let link = net.peers[0].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[0].network_service().clone(), link) + }; + let sync = net.peers[0].sync_service().clone(); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }, + link, + network: net_service, + sync, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + run_grandpa_voter(grandpa_params).expect("all in order with client and network") + }); + + fn alice_voter2( + peers: &[Ed25519Keyring], + net: Arc>, + ) -> impl Future + Send { + let keystore = create_keystore(peers[0]); + let mut net = net.lock(); + + // we add a new peer to the test network and we'll use + // the network service of this new peer + net.add_authority_peer(); + let net_service = net.peers[2].network_service().clone(); + let sync = net.peers[2].sync_service().clone(); + // but we'll reuse the client from the first peer (alice_voter1) + // since we want to share the same database, so that we can + // read the persisted state after aborting alice_voter1. + let alice_client = net.peer(0).client().clone(); + + let (_block_import, _, link) = net.make_block_import(alice_client); + let link = link.lock().take().unwrap(); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 0)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }, + link, + network: net_service, + sync, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + run_grandpa_voter(grandpa_params) + .expect("all in order with client and network") + .map(move |r| { + // we need to keep the block_import alive since it owns the + // sender for the voter commands channel, if that gets dropped + // then the voter will stop + drop(_block_import); + r + }) + } + + tokio::spawn(alice_voter1); + + net.peer(0).push_blocks(20, false); + net.run_until_sync().await; + + assert_eq!(net.peer(0).client().info().best_number, 20, "Peer #{} failed to sync", 0); + + let net = Arc::new(Mutex::new(net)); + + let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); + + { + let (round_rx, round_tx) = bob_network.round_communication( + Some((peers[1].public().into(), bob_keystore).into()), + communication::Round(1), + communication::SetId(0), + Arc::new(VoterSet::new(voters).unwrap()), + HasVoted::No, + ); + + tokio::spawn(bob_network); + + let round_tx = Arc::new(Mutex::new(round_tx)); + let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); + + let net = net.clone(); + let state = Arc::new(AtomicUsize::new(0)); + + tokio::spawn(round_rx.for_each(move |signed| { + let net2 = net.clone(); + let net = net.clone(); + let abort = abort.clone(); + let round_tx = round_tx.clone(); + let state = state.clone(); + let exit_tx = exit_tx.clone(); + + async move { + if state.compare_exchange(0, 1, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 0 { + // the first message we receive should be a prevote from alice. + let prevote = match signed.message { + finality_grandpa::Message::Prevote(prevote) => prevote, + _ => panic!("voter should prevote."), + }; + + // its chain has 20 blocks and the voter targets 3/4 of the + // unfinalized chain, so the vote should be for block 15 + assert_eq!(prevote.target_number, 15); + + // we push 20 more blocks to alice's chain + net.lock().peer(0).push_blocks(20, false); + + let interval = + futures::stream::unfold(Delay::new(Duration::from_millis(200)), |delay| { + Box::pin(async move { + delay.await; + Some(((), Delay::new(Duration::from_millis(200)))) + }) + }); + + interval + .take_while(move |_| { + future::ready(net2.lock().peer(1).client().info().best_number != 40) + }) + .for_each(|_| future::ready(())) + .await; + + let block_30_hash = + net.lock().peer(0).client().as_client().hash(30).unwrap().unwrap(); + + // we restart alice's voter + abort.abort(); + tokio::spawn(alice_voter2(peers, net.clone())); + + // and we push our own prevote for block 30 + let prevote = + finality_grandpa::Prevote { target_number: 30, target_hash: block_30_hash }; + + // One should either be calling `Sink::send` or `Sink::start_send` followed + // by `Sink::poll_complete` to make sure items are being flushed. Given that + // we send in a loop including a delay until items are received, this can be + // ignored for the sake of reduced complexity. + Pin::new(&mut *round_tx.lock()) + .start_send(finality_grandpa::Message::Prevote(prevote)) + .unwrap(); + } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 1 + { + // the next message we receive should be our own prevote + let prevote = match signed.message { + finality_grandpa::Message::Prevote(prevote) => prevote, + _ => panic!("We should receive our own prevote."), + }; + + // targeting block 30 + assert!(prevote.target_number == 30); + + // after alice restarts it should send its previous prevote + // therefore we won't ever receive it again since it will be a + // known message on the gossip layer + } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 2 + { + // we then receive a precommit from alice for block 15 + // even though we casted a prevote for block 30 + let precommit = match signed.message { + finality_grandpa::Message::Precommit(precommit) => precommit, + _ => panic!("voter should precommit."), + }; + + assert!(precommit.target_number == 15); + + // signal exit + exit_tx.clone().lock().take().unwrap().send(()).unwrap(); + } else { + panic!() + } + } + })); + } + + run_until_complete(exit_rx.into_future(), &net).await; +} + +#[tokio::test] +async fn finalize_3_voters_1_light_observer() { + sp_tracing::try_init_simple(); + let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + let voters = make_ids(authorities); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); + let voters = initialize_grandpa(&mut net, authorities); + let observer = observer::run_grandpa_observer( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore: None, + name: Some("observer".to_string()), + local_role: Role::Full, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }, + net.peers[3].data.lock().take().expect("link initialized at startup; qed"), + net.peers[3].network_service().clone(), + net.peers[3].sync_service().clone(), + ) + .unwrap(); + net.peer(0).push_blocks(20, false); + net.run_until_sync().await; + + for i in 0..4 { + assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); + } + + let net = Arc::new(Mutex::new(net)); + + tokio::spawn(voters); + tokio::spawn(observer); + run_to_completion(20, net.clone(), authorities).await; +} + +#[tokio::test] +async fn voter_catches_up_to_latest_round_when_behind() { + sp_tracing::try_init_simple(); + + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers); + + let net = GrandpaTestNet::new(TestApi::new(voters), 2, 0); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + let voter = |keystore, + peer_id, + link, + net: Arc>| + -> Pin + Send>> { + let mut net = net.lock(); + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore, + name: Some(format!("peer#{}", peer_id)), + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }, + link, + network: net.peer(peer_id).network_service().clone(), + sync: net.peer(peer_id).sync_service().clone(), + voting_rule: (), + prometheus_registry: None, + shared_voter_state: SharedVoterState::empty(), + telemetry: None, + }; + + Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) + }; + + // spawn authorities + for (peer_id, key) in peers.iter().enumerate() { + let (client, link) = { + let net = net.lock(); + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].client().clone(), link) + }; + + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &50)) + .for_each(move |_| future::ready(())), + ); + + let keystore = create_keystore(*key); + + let voter = voter(Some(keystore), peer_id, link, net.clone()); + + tokio::spawn(voter); + } + + net.lock().peer(0).push_blocks(50, false); + net.lock().run_until_sync().await; + + // wait for them to finalize block 50. since they'll vote on 3/4 of the + // unfinalized chain it will take at least 4 rounds to do it. + let wait_for_finality = ::futures::future::join_all(finality_notifications); + + // spawn a new voter, it should be behind by at least 4 rounds and should be + // able to catch up to the latest round + let test = { + let net = net.clone(); + + wait_for_finality.then(move |_| { + net.lock().add_authority_peer(); + + let link = { + let net = net.lock(); + let mut link = net.peers[2].data.lock(); + link.take().expect("link initialized at startup; qed") + }; + let set_state = link.persistent_data.set_state.clone(); + tokio::spawn(voter(None, 2, link, net.clone())); + + let start_time = std::time::Instant::now(); + let timeout = Duration::from_secs(5 * 60); + let wait_for_catch_up = futures::future::poll_fn(move |_| { + // The voter will start at round 1 and since everyone else is + // already at a later round the only way to get to round 4 (or + // later) is by issuing a catch up request. + if set_state.read().last_completed_round().number >= 4 { + Poll::Ready(()) + } else if start_time.elapsed() > timeout { + panic!("Timed out while waiting for catch up to happen") + } else { + Poll::Pending + } + }); + + wait_for_catch_up + }) + }; + + let drive_to_completion = futures::future::poll_fn(|cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }); + future::select(test, drive_to_completion).await; +} + +type TestEnvironment = + Environment; + +fn test_environment_with_select_chain( + link: &TestLinkHalf, + keystore: Option, + network_service: N, + sync_service: S, + select_chain: SC, + voting_rule: VR, +) -> TestEnvironment +where + N: NetworkT, + S: SyncingT, + VR: VotingRule, +{ + let PersistentData { ref authority_set, ref set_state, .. } = link.persistent_data; + + let config = Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_generation_period: 32, + keystore, + name: None, + local_role: Role::Authority, + observer_enabled: true, + telemetry: None, + protocol_name: grandpa_protocol_name::NAME.into(), + }; + + let network = NetworkBridge::new( + network_service.clone(), + sync_service, + config.clone(), + set_state.clone(), + None, + None, + ); + + Environment { + authority_set: authority_set.clone(), + config: config.clone(), + client: link.client.clone(), + select_chain, + set_id: authority_set.set_id(), + voter_set_state: set_state.clone(), + voters: Arc::new(authority_set.current_authorities()), + network, + voting_rule, + metrics: None, + justification_sender: None, + telemetry: None, + _phantom: PhantomData, + } +} + +fn test_environment( + link: &TestLinkHalf, + keystore: Option, + network_service: N, + sync_service: S, + voting_rule: VR, +) -> TestEnvironment, VR> +where + N: NetworkT, + S: SyncingT, + VR: VotingRule, +{ + test_environment_with_select_chain( + link, + keystore, + network_service, + sync_service, + link.select_chain.clone(), + voting_rule, + ) +} + +#[tokio::test] +async fn grandpa_environment_respects_voting_rules() { + use finality_grandpa::voter::Environment; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); + let link = peer.data.lock().take().unwrap(); + + // add 21 blocks + let hashes = peer.push_blocks(21, false); + + // create an environment with no voting rule restrictions + let unrestricted_env = + test_environment(&link, None, network_service.clone(), sync_service.clone(), ()); + + // another with 3/4 unfinalized chain voting rule restriction + let three_quarters_env = test_environment( + &link, + None, + network_service.clone(), + sync_service.clone(), + voting_rule::ThreeQuartersOfTheUnfinalizedChain, + ); + + // and another restricted with the default voting rules: i.e. 3/4 rule and + // always below best block + let default_env = test_environment( + &link, + None, + network_service.clone(), + sync_service, + VotingRulesBuilder::default().build(), + ); + + // the unrestricted environment should just return the best block + assert_eq!( + unrestricted_env + .best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 21, + ); + + // both the other environments should return block 16, which is 3/4 of the + // way in the unfinalized chain + assert_eq!( + three_quarters_env + .best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 16, + ); + + assert_eq!( + default_env + .best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 16, + ); + + // we finalize block 19 with block 21 being the best block + peer.client().finalize_block(hashes[18], None, false).unwrap(); + + // the 3/4 environment should propose block 21 for voting + assert_eq!( + three_quarters_env + .best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 21, + ); + + // while the default environment will always still make sure we don't vote + // on the best block (2 behind) + assert_eq!( + default_env + .best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 19, + ); + + // we finalize block 21 with block 21 being the best block + let hashof21 = hashes[20]; + peer.client().finalize_block(hashof21, None, false).unwrap(); + + // even though the default environment will always try to not vote on the + // best block, there's a hard rule that we can't cast any votes lower than + // the given base (#21). + assert_eq!( + default_env + .best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 21, + ); +} + +#[tokio::test] +async fn grandpa_environment_passes_actual_best_block_to_voting_rules() { + // NOTE: this is a "regression" test since initially we were not passing the + // best block to the voting rules + use finality_grandpa::voter::Environment; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); + let link = peer.data.lock().take().unwrap(); + let client = peer.client().as_client().clone(); + let select_chain = MockSelectChain::default(); + + // add 42 blocks + peer.push_blocks(42, false); + + // create an environment with a voting rule that always restricts votes to + // before the best block by 5 blocks + let env = test_environment_with_select_chain( + &link, + None, + network_service.clone(), + sync_service, + select_chain.clone(), + voting_rule::BeforeBestBlockBy(5), + ); + + // both best block and finality target are pointing to the same latest block, + // therefore we must restrict our vote on top of the given target (#21) + let hashof21 = client.expect_block_hash_from_id(&BlockId::Number(21)).unwrap(); + select_chain.set_best_chain(client.expect_header(hashof21).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof21).unwrap().hash()); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 16, + ); + + // the returned finality target is already 11 blocks from the best block, + // therefore there should be no further restriction by the voting rule + let hashof10 = client.expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); + select_chain.set_best_chain(client.expect_header(hashof21).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof10).unwrap().hash()); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .1, + 10, + ); +} + +#[tokio::test] +async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_target() { + sp_tracing::try_init_simple(); + use finality_grandpa::voter::Environment; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); + let link = peer.data.lock().take().unwrap(); + let client = peer.client().as_client().clone(); + let select_chain = MockSelectChain::default(); + let voting_rule = AssertBestBlock::default(); + let env = test_environment_with_select_chain( + &link, + None, + network_service.clone(), + sync_service.clone(), + select_chain.clone(), + voting_rule.clone(), + ); + + // create a chain that is 10 blocks long + peer.push_blocks(10, false); + + let hashof5_a = client.expect_block_hash_from_id(&BlockId::Number(5)).unwrap(); + let hashof10_a = client.expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); + + // create a fork starting at block 4 that is 6 blocks long + let fork = peer.generate_blocks_at( + BlockId::Number(4), + 6, + BlockOrigin::File, + |mut builder| { + builder.push_deposit_log_digest_item(DigestItem::Other(vec![1])).unwrap(); + builder.build().unwrap().block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ); + + let hashof5_b = *fork.first().unwrap(); + let hashof10_b = *fork.last().unwrap(); + + // returning a finality target that's higher than the best block is inconsistent, + // therefore the best block should be set to be the same block as the target + select_chain.set_best_chain(client.expect_header(hashof5_a).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof10_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof10_a); + + // the voting rule will internally assert that the best block that was passed was `hashof10_a`, + // instead of the one returned by `SelectChain` + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof10_a, + ); + + // best block and finality target are blocks at the same height but on different forks, + // we should override the initial best block (#5B) with the target block (#5A) + select_chain.set_best_chain(client.expect_header(hashof5_b).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof5_a); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof5_a, + ); + + // best block is higher than finality target but it's on a different fork, + // we should override the initial best block (#5A) with the target block (#5B) + select_chain.set_best_chain(client.expect_header(hashof10_b).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof5_a); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof5_a, + ); + + // best block is higher than finality target and it's on the same fork, + // the best block passed to the voting rule should not be overriden + select_chain.set_best_chain(client.expect_header(hashof10_a).unwrap()); + select_chain.set_finality_target(client.expect_header(hashof5_a).unwrap().hash()); + voting_rule.set_expected_best_block(hashof10_a); + + assert_eq!( + env.best_chain_containing(peer.client().info().finalized_hash) + .await + .unwrap() + .unwrap() + .0, + hashof5_a, + ); +} + +#[tokio::test] +async fn grandpa_environment_never_overwrites_round_voter_state() { + use finality_grandpa::voter::Environment; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); + let link = peer.data.lock().take().unwrap(); + + let keystore = create_keystore(peers[0]); + let environment = + test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()); + + let round_state = || finality_grandpa::round::State::genesis(Default::default()); + let base = || Default::default(); + let historical_votes = || finality_grandpa::HistoricalVotes::new(); + + let get_current_round = |n| { + let current_rounds = environment + .voter_set_state + .read() + .with_current_round(n) + .map(|(_, current_rounds)| current_rounds.clone()) + .ok()?; + + Some(current_rounds.get(&n).unwrap().clone()) + }; + + // round 2 should not be tracked + assert_eq!(get_current_round(2), None); + + // after completing round 1 we should start tracking round 2 + environment.completed(1, round_state(), base(), &historical_votes()).unwrap(); + + assert_eq!(get_current_round(2).unwrap(), HasVoted::No); + + // we need to call `round_data` for the next round to pick up + // from the keystore which authority id we'll be using to vote + environment.round_data(2); + + let info = peer.client().info(); + + let prevote = + finality_grandpa::Prevote { target_hash: info.best_hash, target_number: info.best_number }; + + // we prevote for round 2 which should lead to us updating the voter state + environment.prevoted(2, prevote.clone()).unwrap(); + + let has_voted = get_current_round(2).unwrap(); + + assert_matches!(has_voted, HasVoted::Yes(_, _)); + assert_eq!(*has_voted.prevote().unwrap(), prevote); + + // if we report round 1 as completed again we should not overwrite the + // voter state for round 2 + environment.completed(1, round_state(), base(), &historical_votes()).unwrap(); + + assert_matches!(get_current_round(2).unwrap(), HasVoted::Yes(_, _)); +} + +#[tokio::test] +async fn justification_with_equivocation() { + use sp_application_crypto::Pair; + + // we have 100 authorities + let pairs = (0..100).map(|n| AuthorityPair::from_seed(&[n; 32])).collect::>(); + let voters = pairs.iter().map(AuthorityPair::public).map(|id| (id, 1)).collect::>(); + let api = TestApi::new(voters.clone()); + let mut net = GrandpaTestNet::new(api.clone(), 1, 0); + + // we create a basic chain with 3 blocks (no forks) + net.peer(0).push_blocks(3, false); + + let client = net.peer(0).client().as_client().clone(); + let hashof1 = client.expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); + let hashof2 = client.expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); + let hashof3 = client.expect_block_hash_from_id(&BlockId::Number(3)).unwrap(); + let block1 = client.expect_header(hashof1).unwrap(); + let block2 = client.expect_header(hashof2).unwrap(); + let block3 = client.expect_header(hashof3).unwrap(); + + let set_id = 0; + let justification = { + let round = 1; + + let make_precommit = |target_hash, target_number, pair: &AuthorityPair| { + let precommit = finality_grandpa::Precommit { target_hash, target_number }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_consensus_grandpa::localized_payload(round, set_id, &msg); + + let precommit = finality_grandpa::SignedPrecommit { + precommit: precommit.clone(), + signature: pair.sign(&encoded[..]), + id: pair.public(), + }; + + precommit + }; + + let mut precommits = Vec::new(); + + // we have 66/100 votes for block #3 and therefore do not have threshold to finalize + for pair in pairs.iter().take(66) { + let precommit = make_precommit(block3.hash(), *block3.number(), pair); + precommits.push(precommit); + } + + // we create an equivocation for the 67th validator targetting blocks #1 and #2. + // this should be accounted as "voting for all blocks" and therefore block #3 will + // have 67/100 votes, reaching finality threshold. + { + precommits.push(make_precommit(block1.hash(), *block1.number(), &pairs[66])); + precommits.push(make_precommit(block2.hash(), *block2.number(), &pairs[66])); + } + + let commit = finality_grandpa::Commit { + target_hash: block3.hash(), + target_number: *block3.number(), + precommits, + }; + + GrandpaJustification::from_commit(&client, round, commit).unwrap() + }; + + // the justification should include the minimal necessary vote ancestry and + // the commit should be valid + assert!(justification.verify(set_id, &voters).is_ok()); +} + +#[tokio::test] +async fn imports_justification_for_regular_blocks_on_import() { + // NOTE: this is a regression test since initially we would only import + // justifications for authority change blocks, and would discard any + // existing justification otherwise. + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api.clone(), 1, 0); + + let client = net.peer(0).client().clone(); + let (mut block_import, ..) = net.make_block_import(client.clone()); + let full_client = client.as_client(); + + // create a new block (without importing it) + let generate_block = |parent| { + let builder = full_client.new_block_at(parent, Default::default(), false).unwrap(); + builder.build().unwrap().block + }; + + // create a valid justification, with one precommit targeting the block + let make_justification = |round, hash, number| { + let set_id = 0; + + let precommit = finality_grandpa::Precommit { target_hash: hash, target_number: number }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_consensus_grandpa::localized_payload(round, set_id, &msg); + let signature = peers[0].sign(&encoded[..]).into(); + + let precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: peers[0].public().into(), + }; + + let commit = finality_grandpa::Commit { + target_hash: hash, + target_number: number, + precommits: vec![precommit], + }; + + GrandpaJustification::from_commit(&full_client, round, commit).unwrap() + }; + + let mut generate_and_import_block_with_justification = |parent| { + // we import the block with justification attached + let block = generate_block(parent); + let block_hash = block.hash(); + let justification = make_justification(1, block_hash, *block.header.number()); + + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.justifications = Some((GRANDPA_ENGINE_ID, justification.encode()).into()); + import.body = Some(block.extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + assert_eq!( + // NOTE: we use `block_on` here because async closures are + // unsupported and it doesn't matter if we block in a test + futures::executor::block_on(block_import.import_block(import)).unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: false, + clear_justification_requests: false, + bad_justification: false, + is_new_best: true, + ..Default::default() + }), + ); + + block_hash + }; + + let block1 = + generate_and_import_block_with_justification(full_client.chain_info().genesis_hash); + + // the justification should be imported and available from the client + assert!(client.justifications(block1).unwrap().is_some()); + + // subsequent justifications should be ignored and not imported + let mut parent = block1; + for _ in 2..JUSTIFICATION_IMPORT_PERIOD { + parent = generate_and_import_block_with_justification(parent); + assert!(client.justifications(parent).unwrap().is_none()); + } + + let block32 = generate_and_import_block_with_justification(parent); + + // until we reach a block in the next justification import period, at + // which point we should import it + assert!(client.justifications(block32).unwrap().is_some()); +} + +#[tokio::test] +async fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { + use finality_grandpa::voter::Environment; + + let alice = Ed25519Keyring::Alice; + let voters = make_ids(&[alice]); + + let environment = { + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); + let link = peer.data.lock().take().unwrap(); + let keystore = create_keystore(alice); + test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()) + }; + + let signed_prevote = { + let prevote = finality_grandpa::Prevote { target_hash: H256::random(), target_number: 1 }; + + let signed = alice.sign(&[]).into(); + (prevote, signed) + }; + + let mut equivocation = finality_grandpa::Equivocation { + round_number: 1, + identity: alice.public().into(), + first: signed_prevote.clone(), + second: signed_prevote.clone(), + }; + + // we need to call `round_data` to pick up from the keystore which + // authority id we'll be using to vote + environment.round_data(1); + + // reporting the equivocation should fail since the offender is a local + // authority (i.e. we have keys in our keystore for the given id) + let equivocation_proof = sp_consensus_grandpa::Equivocation::Prevote(equivocation.clone()); + assert!(matches!(environment.report_equivocation(equivocation_proof), Err(Error::Safety(_)))); + + // if we set the equivocation offender to another id for which we don't have + // keys it should work + equivocation.identity = TryFrom::try_from(&[1; 32][..]).unwrap(); + let equivocation_proof = sp_consensus_grandpa::Equivocation::Prevote(equivocation); + assert!(environment.report_equivocation(equivocation_proof).is_ok()); +} + +#[tokio::test] +async fn revert_prunes_authority_changes() { + sp_tracing::try_init_simple(); + + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; + + type TestBlockBuilder<'a> = + BlockBuilder<'a, Block, PeersFullClient, substrate_test_runtime_client::Backend>; + let edit_block = |mut builder: TestBlockBuilder| { + add_scheduled_change( + &mut builder, + ScheduledChange { next_authorities: make_ids(peers), delay: 0 }, + ); + builder.build().unwrap().block + }; + + let api = TestApi::new(make_ids(peers)); + + let mut net = GrandpaTestNet::new(api, 3, 0); + tokio::spawn(initialize_grandpa(&mut net, peers)); + + let peer = net.peer(0); + let client = peer.client().as_client(); + + // Test scenario: (X) = auth-change, 24 = revert-point + // + // +---------(27) + // / + // 0---(21)---23---24---25---(28)---30 + // ^ \ + // revert-point +------(29) + + // Construct canonical chain + + // add 20 blocks + peer.push_blocks(20, false); + // at block 21 we add an authority transition + peer.generate_blocks(1, BlockOrigin::File, edit_block); + // add more blocks on top of it (until we have 24) + peer.push_blocks(3, false); + // add more blocks on top of it (until we have 27) + peer.push_blocks(3, false); + // at block 28 we add an authority transition + peer.generate_blocks(1, BlockOrigin::File, edit_block); + // add more blocks on top of it (until we have 30) + peer.push_blocks(2, false); + + // Fork before revert point + + // add more blocks on top of block 23 (until we have 26) + let hash = peer + .generate_blocks_at( + BlockId::Number(23), + 3, + BlockOrigin::File, + |mut builder| { + builder.push_deposit_log_digest_item(DigestItem::Other(vec![1])).unwrap(); + builder.build().unwrap().block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ) + .pop() + .unwrap(); + // at block 27 of the fork add an authority transition + peer.generate_blocks_at( + BlockId::Hash(hash), + 1, + BlockOrigin::File, + edit_block, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ); + + // Fork after revert point + + // add more block on top of block 25 (until we have 28) + let hash = peer + .generate_blocks_at( + BlockId::Number(25), + 3, + BlockOrigin::File, + |mut builder| { + builder.push_deposit_log_digest_item(DigestItem::Other(vec![2])).unwrap(); + builder.build().unwrap().block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ) + .pop() + .unwrap(); + // at block 29 of the fork add an authority transition + peer.generate_blocks_at( + BlockId::Hash(hash), + 1, + BlockOrigin::File, + edit_block, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ); + + revert(client.clone(), 6).unwrap(); + + let persistent_data: PersistentData = aux_schema::load_persistent( + &*client, + client.info().genesis_hash, + Zero::zero(), + || unreachable!(), + ) + .unwrap(); + let changes_num: Vec<_> = persistent_data + .authority_set + .inner() + .pending_standard_changes + .iter() + .map(|(_, n, _)| *n) + .collect(); + assert_eq!(changes_num, [21, 27]); +} diff --git a/substrate/client/consensus/grandpa/src/until_imported.rs b/substrate/client/consensus/grandpa/src/until_imported.rs new file mode 100644 index 00000000..14f32ecc --- /dev/null +++ b/substrate/client/consensus/grandpa/src/until_imported.rs @@ -0,0 +1,1041 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper stream for waiting until one or more blocks are imported before +//! passing through inner items. This is done in a generic way to support +//! many different kinds of items. +//! +//! This is used for votes and commit messages currently. + +use super::{ + BlockStatus as BlockStatusT, BlockSyncRequester as BlockSyncRequesterT, CommunicationIn, Error, + SignedMessage, LOG_TARGET, +}; + +use finality_grandpa::voter; +use futures::{ + prelude::*, + stream::{Fuse, StreamExt}, +}; +use futures_timer::Delay; +use log::{debug, warn}; +use parking_lot::Mutex; +use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; +use sc_client_api::{BlockImportNotification, ImportNotifications}; +use sc_utils::mpsc::TracingUnboundedReceiver; +use sp_consensus_grandpa::AuthorityId; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; + +use std::{ + collections::{HashMap, VecDeque}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::{Duration, Instant}, +}; + +const LOG_PENDING_INTERVAL: Duration = Duration::from_secs(15); + +/// Something that needs to be withheld until specific blocks are available. +/// +/// For example a GRANDPA commit message which is not of any use without the corresponding block +/// that it commits on. +pub(crate) trait BlockUntilImported: Sized { + /// The type that is blocked on. + type Blocked; + + /// Check if a new incoming item needs awaiting until a block(s) is imported. + fn needs_waiting>( + input: Self::Blocked, + status_check: &S, + ) -> Result, Error>; + + /// called when the wait has completed. The canonical number is passed through + /// for further checks. + fn wait_completed(self, canon_number: NumberFor) -> Option; +} + +/// Describes whether a given [`BlockUntilImported`] (a) should be discarded, (b) is waiting for +/// specific blocks to be imported or (c) is ready to be used. +/// +/// A reason for discarding a [`BlockUntilImported`] would be if a referenced block is perceived +/// under a different number than specified in the message. +pub(crate) enum DiscardWaitOrReady { + Discard, + Wait(Vec<(Block::Hash, NumberFor, W)>), + Ready(R), +} + +/// Prometheus metrics for the `UntilImported` queue. +// At a given point in time there can be more than one `UntilImported` queue. One can not register a +// metric twice, thus queues need to share the same Prometheus metrics instead of instantiating +// their own ones. +// +// When a queue is dropped it might still contain messages. In order for those to not distort the +// Prometheus metrics, the `Metric` struct cleans up after itself within its `Drop` implementation +// by subtracting the local_waiting_messages (the amount of messages left in the queue about to +// be dropped) from the global_waiting_messages gauge. +pub(crate) struct Metrics { + global_waiting_messages: Gauge, + local_waiting_messages: u64, +} + +impl Metrics { + pub(crate) fn register(registry: &Registry) -> Result { + Ok(Self { + global_waiting_messages: register( + Gauge::new( + "substrate_finality_grandpa_until_imported_waiting_messages_number", + "Number of finality grandpa messages waiting within the until imported queue.", + )?, + registry, + )?, + local_waiting_messages: 0, + }) + } + + fn waiting_messages_inc(&mut self) { + self.local_waiting_messages += 1; + self.global_waiting_messages.inc(); + } + + fn waiting_messages_dec(&mut self) { + self.local_waiting_messages -= 1; + self.global_waiting_messages.dec(); + } +} + +impl Clone for Metrics { + fn clone(&self) -> Self { + Metrics { + global_waiting_messages: self.global_waiting_messages.clone(), + // When cloned, reset local_waiting_messages, so the global counter is not reduced a + // second time for the same messages on `drop` of the clone. + local_waiting_messages: 0, + } + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + // Reduce the global counter by the amount of messages that were still left in the dropped + // queue. + self.global_waiting_messages.sub(self.local_waiting_messages) + } +} + +/// Buffering incoming messages until blocks with given hashes are imported. +pub(crate) struct UntilImported +where + Block: BlockT, + I: Stream + Unpin, + M: BlockUntilImported, +{ + import_notifications: Fuse>>, + block_sync_requester: BlockSyncRequester, + status_check: BlockStatus, + incoming_messages: Fuse, + ready: VecDeque, + /// Interval at which to check status of each awaited block. + check_pending: Pin> + Send>>, + /// Mapping block hashes to their block number, the point in time it was + /// first encountered (Instant) and a list of GRANDPA messages referencing + /// the block hash. + pending: HashMap, Instant, Vec)>, + + /// Queue identifier for differentiation in logs. + identifier: &'static str, + /// Prometheus metrics. + metrics: Option, +} + +impl Unpin + for UntilImported +where + Block: BlockT, + I: Stream + Unpin, + M: BlockUntilImported, +{ +} + +impl + UntilImported +where + Block: BlockT, + BlockStatus: BlockStatusT, + BlockSyncRequester: BlockSyncRequesterT, + I: Stream + Unpin, + M: BlockUntilImported, +{ + /// Create a new `UntilImported` wrapper. + pub(crate) fn new( + import_notifications: ImportNotifications, + block_sync_requester: BlockSyncRequester, + status_check: BlockStatus, + incoming_messages: I, + identifier: &'static str, + metrics: Option, + ) -> Self { + // how often to check if pending messages that are waiting for blocks to be + // imported can be checked. + // + // the import notifications interval takes care of most of this; this is + // used in the event of missed import notifications + const CHECK_PENDING_INTERVAL: Duration = Duration::from_secs(5); + + let check_pending = futures::stream::unfold(Delay::new(CHECK_PENDING_INTERVAL), |delay| { + Box::pin(async move { + delay.await; + Some((Ok(()), Delay::new(CHECK_PENDING_INTERVAL))) + }) + }); + + UntilImported { + import_notifications: import_notifications.fuse(), + block_sync_requester, + status_check, + incoming_messages: incoming_messages.fuse(), + ready: VecDeque::new(), + check_pending: Box::pin(check_pending), + pending: HashMap::new(), + identifier, + metrics, + } + } +} + +impl Stream + for UntilImported +where + Block: BlockT, + BStatus: BlockStatusT, + BSyncRequester: BlockSyncRequesterT, + I: Stream + Unpin, + M: BlockUntilImported, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // We are using a `this` variable in order to allow multiple simultaneous mutable borrow to + // `self`. + let this = &mut *self; + + loop { + match StreamExt::poll_next_unpin(&mut this.incoming_messages, cx) { + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some(input)) => { + // new input: schedule wait of any parts which require + // blocks to be known. + match M::needs_waiting(input, &this.status_check)? { + DiscardWaitOrReady::Discard => {}, + DiscardWaitOrReady::Wait(items) => { + for (target_hash, target_number, wait) in items { + this.pending + .entry(target_hash) + .or_insert_with(|| (target_number, Instant::now(), Vec::new())) + .2 + .push(wait) + } + }, + DiscardWaitOrReady::Ready(item) => this.ready.push_back(item), + } + + if let Some(metrics) = &mut this.metrics { + metrics.waiting_messages_inc(); + } + }, + Poll::Pending => break, + } + } + + loop { + match StreamExt::poll_next_unpin(&mut this.import_notifications, cx) { + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some(notification)) => { + // new block imported. queue up all messages tied to that hash. + if let Some((_, _, messages)) = this.pending.remove(¬ification.hash) { + let canon_number = *notification.header.number(); + let ready_messages = + messages.into_iter().filter_map(|m| m.wait_completed(canon_number)); + + this.ready.extend(ready_messages); + } + }, + Poll::Pending => break, + } + } + + let mut update_interval = false; + while let Poll::Ready(Some(Ok(()))) = this.check_pending.poll_next_unpin(cx) { + update_interval = true; + } + + if update_interval { + let mut known_keys = Vec::new(); + for (&block_hash, &mut (block_number, ref mut last_log, ref v)) in + this.pending.iter_mut() + { + if let Some(number) = this.status_check.block_number(block_hash)? { + known_keys.push((block_hash, number)); + } else { + let next_log = *last_log + LOG_PENDING_INTERVAL; + if Instant::now() >= next_log { + debug!( + target: LOG_TARGET, + "Waiting to import block {} before {} {} messages can be imported. \ + Requesting network sync service to retrieve block from. \ + Possible fork?", + block_hash, + v.len(), + this.identifier, + ); + + // NOTE: when sending an empty vec of peers the + // underlying should make a best effort to sync the + // block from any peers it knows about. + this.block_sync_requester.set_sync_fork_request( + vec![], + block_hash, + block_number, + ); + + *last_log = next_log; + } + } + } + + for (known_hash, canon_number) in known_keys { + if let Some((_, _, pending_messages)) = this.pending.remove(&known_hash) { + let ready_messages = + pending_messages.into_iter().filter_map(|m| m.wait_completed(canon_number)); + + this.ready.extend(ready_messages); + } + } + } + + if let Some(ready) = this.ready.pop_front() { + if let Some(metrics) = &mut this.metrics { + metrics.waiting_messages_dec(); + } + return Poll::Ready(Some(Ok(ready))) + } + + if this.import_notifications.is_done() && this.incoming_messages.is_done() { + Poll::Ready(None) + } else { + Poll::Pending + } + } +} + +fn warn_authority_wrong_target(hash: H, id: AuthorityId) { + warn!( + target: LOG_TARGET, + "Authority {:?} signed GRANDPA message with \ + wrong block number for hash {}", + id, + hash, + ); +} + +impl BlockUntilImported for SignedMessage { + type Blocked = Self; + + fn needs_waiting>( + msg: Self::Blocked, + status_check: &BlockStatus, + ) -> Result, Error> { + let (&target_hash, target_number) = msg.target(); + + if let Some(number) = status_check.block_number(target_hash)? { + if number != target_number { + warn_authority_wrong_target(target_hash, msg.id); + return Ok(DiscardWaitOrReady::Discard) + } else { + return Ok(DiscardWaitOrReady::Ready(msg)) + } + } + + Ok(DiscardWaitOrReady::Wait(vec![(target_hash, target_number, msg)])) + } + + fn wait_completed(self, canon_number: NumberFor) -> Option { + let (&target_hash, target_number) = self.target(); + if canon_number != target_number { + warn_authority_wrong_target(target_hash, self.id); + + None + } else { + Some(self) + } + } +} + +/// Helper type definition for the stream which waits until vote targets for +/// signed messages are imported. +pub(crate) type UntilVoteTargetImported = UntilImported< + Block, + BlockStatus, + BlockSyncRequester, + I, + SignedMessage<::Header>, +>; + +/// This blocks a global message import, i.e. a commit or catch up messages, +/// until all blocks referenced in its votes are known. +/// +/// This is used for compact commits and catch up messages which have already +/// been checked for structural soundness (e.g. valid signatures). +/// +/// We use the `Arc`'s reference count to implicitly count the number of outstanding blocks that we +/// are waiting on for the same message (i.e. other `BlockGlobalMessage` instances with the same +/// `inner`). +pub(crate) struct BlockGlobalMessage { + inner: Arc>>>, + target_number: NumberFor, +} + +impl Unpin for BlockGlobalMessage {} + +impl BlockUntilImported for BlockGlobalMessage { + type Blocked = CommunicationIn; + + fn needs_waiting>( + input: Self::Blocked, + status_check: &BlockStatus, + ) -> Result, Error> { + use std::collections::hash_map::Entry; + + enum KnownOrUnknown { + Known(N), + Unknown(N), + } + + impl KnownOrUnknown { + fn number(&self) -> &N { + match *self { + KnownOrUnknown::Known(ref n) => n, + KnownOrUnknown::Unknown(ref n) => n, + } + } + } + + let mut checked_hashes: HashMap<_, KnownOrUnknown>> = HashMap::new(); + + { + // returns false when should early exit. + let mut query_known = |target_hash, perceived_number| -> Result { + // check integrity: all votes for same hash have same number. + let canon_number = match checked_hashes.entry(target_hash) { + Entry::Occupied(entry) => *entry.get().number(), + Entry::Vacant(entry) => { + if let Some(number) = status_check.block_number(target_hash)? { + entry.insert(KnownOrUnknown::Known(number)); + number + } else { + entry.insert(KnownOrUnknown::Unknown(perceived_number)); + perceived_number + } + }, + }; + + if canon_number != perceived_number { + // invalid global message: messages targeting wrong number + // or at least different from other vote in same global + // message. + return Ok(false) + } + + Ok(true) + }; + + match input { + voter::CommunicationIn::Commit(_, ref commit, ..) => { + // add known hashes from all precommits. + let precommit_targets = + commit.precommits.iter().map(|c| (c.target_number, c.target_hash)); + + for (target_number, target_hash) in precommit_targets { + if !query_known(target_hash, target_number)? { + return Ok(DiscardWaitOrReady::Discard) + } + } + }, + voter::CommunicationIn::CatchUp(ref catch_up, ..) => { + // add known hashes from all prevotes and precommits. + let prevote_targets = catch_up + .prevotes + .iter() + .map(|s| (s.prevote.target_number, s.prevote.target_hash)); + + let precommit_targets = catch_up + .precommits + .iter() + .map(|s| (s.precommit.target_number, s.precommit.target_hash)); + + let targets = prevote_targets.chain(precommit_targets); + + for (target_number, target_hash) in targets { + if !query_known(target_hash, target_number)? { + return Ok(DiscardWaitOrReady::Discard) + } + } + }, + }; + } + + let unknown_hashes = checked_hashes + .into_iter() + .filter_map(|(hash, num)| match num { + KnownOrUnknown::Unknown(number) => Some((hash, number)), + KnownOrUnknown::Known(_) => None, + }) + .collect::>(); + + if unknown_hashes.is_empty() { + // none of the hashes in the global message were unknown. + // we can just return the message directly. + return Ok(DiscardWaitOrReady::Ready(input)) + } + + let locked_global = Arc::new(Mutex::new(Some(input))); + + let items_to_await = unknown_hashes + .into_iter() + .map(|(hash, target_number)| { + ( + hash, + target_number, + BlockGlobalMessage { inner: locked_global.clone(), target_number }, + ) + }) + .collect(); + + // schedule waits for all unknown messages. + // when the last one of these has `wait_completed` called on it, + // the global message will be returned. + Ok(DiscardWaitOrReady::Wait(items_to_await)) + } + + fn wait_completed(self, canon_number: NumberFor) -> Option { + if self.target_number != canon_number { + // Delete the inner message so it won't ever be forwarded. Future calls to + // `wait_completed` on the same `inner` will ignore it. + *self.inner.lock() = None; + return None + } + + match Arc::try_unwrap(self.inner) { + // This is the last reference and thus the last outstanding block to be awaited. `inner` + // is either `Some(_)` or `None`. The latter implies that a previous `wait_completed` + // call witnessed a block number mismatch (see above). + Ok(inner) => Mutex::into_inner(inner), + // There are still other strong references to this `Arc`, thus the message is blocked on + // other blocks to be imported. + Err(_) => None, + } + } +} + +/// A stream which gates off incoming global messages, i.e. commit and catch up +/// messages, until all referenced block hashes have been imported. +pub(crate) type UntilGlobalMessageBlocksImported = + UntilImported>; + +#[cfg(test)] +mod tests { + use super::*; + use crate::{CatchUp, CompactCommit}; + use finality_grandpa::Precommit; + use futures::future::Either; + use futures_timer::Delay; + use sc_client_api::BlockImportNotification; + use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; + use sp_consensus::BlockOrigin; + use sp_core::crypto::UncheckedFrom; + use substrate_test_runtime_client::runtime::{Block, Hash, Header}; + + #[derive(Clone)] + struct TestChainState { + sender: TracingUnboundedSender>, + known_blocks: Arc>>, + } + + impl TestChainState { + fn new() -> (Self, ImportNotifications) { + let (tx, rx) = tracing_unbounded("test", 100_000); + let state = + TestChainState { sender: tx, known_blocks: Arc::new(Mutex::new(HashMap::new())) }; + + (state, rx) + } + + fn block_status(&self) -> TestBlockStatus { + TestBlockStatus { inner: self.known_blocks.clone() } + } + + fn import_header(&self, header: Header) { + let hash = header.hash(); + let number = *header.number(); + let (tx, _rx) = tracing_unbounded("unpin-worker-channel", 10_000); + self.known_blocks.lock().insert(hash, number); + self.sender + .unbounded_send(BlockImportNotification::::new( + hash, + BlockOrigin::File, + header, + false, + None, + tx, + )) + .unwrap(); + } + } + + struct TestBlockStatus { + inner: Arc>>, + } + + impl BlockStatusT for TestBlockStatus { + fn block_number(&self, hash: Hash) -> Result, Error> { + Ok(self.inner.lock().get(&hash).map(|x| *x)) + } + } + + #[derive(Clone)] + struct TestBlockSyncRequester { + requests: Arc)>>>, + } + + impl Default for TestBlockSyncRequester { + fn default() -> Self { + TestBlockSyncRequester { requests: Arc::new(Mutex::new(Vec::new())) } + } + } + + impl BlockSyncRequesterT for TestBlockSyncRequester { + fn set_sync_fork_request( + &self, + _peers: Vec, + hash: Hash, + number: NumberFor, + ) { + self.requests.lock().push((hash, number)); + } + } + + fn make_header(number: u64) -> Header { + Header::new( + number, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + } + + // unwrap the commit from `CommunicationIn` returning its fields in a tuple, + // panics if the given message isn't a commit + fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit
) { + match msg { + voter::CommunicationIn::Commit(round, commit, ..) => (round, commit), + _ => panic!("expected commit"), + } + } + + // unwrap the catch up from `CommunicationIn` returning its inner representation, + // panics if the given message isn't a catch up + fn unapply_catch_up(msg: CommunicationIn) -> CatchUp
{ + match msg { + voter::CommunicationIn::CatchUp(catch_up, ..) => catch_up, + _ => panic!("expected catch up"), + } + } + + fn message_all_dependencies_satisfied( + msg: CommunicationIn, + enact_dependencies: F, + ) -> CommunicationIn + where + F: FnOnce(&TestChainState), + { + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + // enact all dependencies before importing the message + enact_dependencies(&chain_state); + + let (global_tx, global_rx) = tracing_unbounded("test", 100_000); + + let until_imported = UntilGlobalMessageBlocksImported::new( + import_notifications, + TestBlockSyncRequester::default(), + block_status, + global_rx, + "global", + None, + ); + + global_tx.unbounded_send(msg).unwrap(); + + let work = until_imported.into_future(); + + futures::executor::block_on(work).0.unwrap().unwrap() + } + + fn blocking_message_on_dependencies( + msg: CommunicationIn, + enact_dependencies: F, + ) -> CommunicationIn + where + F: FnOnce(&TestChainState), + { + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + let (global_tx, global_rx) = tracing_unbounded("test", 100_000); + + let until_imported = UntilGlobalMessageBlocksImported::new( + import_notifications, + TestBlockSyncRequester::default(), + block_status, + global_rx, + "global", + None, + ); + + global_tx.unbounded_send(msg).unwrap(); + + // NOTE: needs to be cloned otherwise it is moved to the stream and + // dropped too early. + let inner_chain_state = chain_state.clone(); + let work = + future::select(until_imported.into_future(), Delay::new(Duration::from_millis(100))) + .then(move |res| match res { + Either::Left(_) => panic!("timeout should have fired first"), + Either::Right((_, until_imported)) => { + // timeout fired. push in the headers. + enact_dependencies(&inner_chain_state); + + until_imported + }, + }); + + futures::executor::block_on(work).0.unwrap().unwrap() + } + + #[test] + fn blocking_commit_message() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let unknown_commit = CompactCommit::
{ + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, + ], + auth_data: Vec::new(), // not used + }; + + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); + + let res = blocking_message_on_dependencies(unknown_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_commit(res), unapply_commit(unknown_commit())); + } + + #[test] + fn commit_message_all_known() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let known_commit = CompactCommit::
{ + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, + ], + auth_data: Vec::new(), // not used + }; + + let known_commit = + || voter::CommunicationIn::Commit(0, known_commit.clone(), voter::Callback::Blank); + + let res = message_all_dependencies_satisfied(known_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_commit(res), unapply_commit(known_commit())); + } + + #[test] + fn blocking_catch_up_message() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: UncheckedFrom::unchecked_from([1; 32]), + signature: UncheckedFrom::unchecked_from([1; 64]), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: UncheckedFrom::unchecked_from([1; 32]), + signature: UncheckedFrom::unchecked_from([1; 64]), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; + + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; + + let unknown_catch_up = finality_grandpa::CatchUp { + round_number: 1, + prevotes, + precommits, + base_hash: h1.hash(), + base_number: *h1.number(), + }; + + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); + + let res = blocking_message_on_dependencies(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up())); + } + + #[test] + fn catch_up_message_all_known() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: UncheckedFrom::unchecked_from([1; 32]), + signature: UncheckedFrom::unchecked_from([1; 64]), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: UncheckedFrom::unchecked_from([1; 32]), + signature: UncheckedFrom::unchecked_from([1; 64]), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; + + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; + + let unknown_catch_up = finality_grandpa::CatchUp { + round_number: 1, + prevotes, + precommits, + base_hash: h1.hash(), + base_number: *h1.number(), + }; + + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); + + let res = message_all_dependencies_satisfied(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up())); + } + + #[test] + fn request_block_sync_for_needed_blocks() { + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + let (global_tx, global_rx) = tracing_unbounded("test", 100_000); + + let block_sync_requester = TestBlockSyncRequester::default(); + + let until_imported = UntilGlobalMessageBlocksImported::new( + import_notifications, + block_sync_requester.clone(), + block_status, + global_rx, + "global", + None, + ); + + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + // we create a commit message, with precommits for blocks 6 and 7 which + // we haven't imported. + let unknown_commit = CompactCommit::
{ + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, + ], + auth_data: Vec::new(), // not used + }; + + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); + + // we send the commit message and spawn the until_imported stream + global_tx.unbounded_send(unknown_commit()).unwrap(); + + let threads_pool = futures::executor::ThreadPool::new().unwrap(); + threads_pool.spawn_ok(until_imported.into_future().map(|_| ())); + + // assert that we will make sync requests + let assert = futures::future::poll_fn(|ctx| { + let block_sync_requests = block_sync_requester.requests.lock(); + + // we request blocks targeted by the precommits that aren't imported + if block_sync_requests.contains(&(h2.hash(), *h2.number())) && + block_sync_requests.contains(&(h3.hash(), *h3.number())) + { + return Poll::Ready(()) + } + + // NOTE: nothing in this function is future-aware (i.e nothing gets registered to wake + // up this future), we manually wake up this task to avoid having to wait until the + // timeout below triggers. + ctx.waker().wake_by_ref(); + + Poll::Pending + }); + + // the `until_imported` stream doesn't request the blocks immediately, + // but it should request them after a small timeout + let timeout = Delay::new(Duration::from_secs(60)); + let test = future::select(assert, timeout) + .map(|res| match res { + Either::Left(_) => {}, + Either::Right(_) => panic!("timed out waiting for block sync request"), + }) + .map(drop); + + futures::executor::block_on(test); + } + + fn test_catch_up() -> Arc>>> { + let header = make_header(5); + + let unknown_catch_up = finality_grandpa::CatchUp { + round_number: 1, + precommits: vec![], + prevotes: vec![], + base_hash: header.hash(), + base_number: *header.number(), + }; + + let catch_up = + voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); + + Arc::new(Mutex::new(Some(catch_up))) + } + + #[test] + fn block_global_message_wait_completed_return_when_all_awaited() { + let msg_inner = test_catch_up(); + + let waiting_block_1 = + BlockGlobalMessage:: { inner: msg_inner.clone(), target_number: 1 }; + + let waiting_block_2 = BlockGlobalMessage:: { inner: msg_inner, target_number: 2 }; + + // waiting_block_2 is still waiting for block 2, thus this should return `None`. + assert!(waiting_block_1.wait_completed(1).is_none()); + + // Message only depended on block 1 and 2. Both have been imported, thus this should yield + // the message. + assert!(waiting_block_2.wait_completed(2).is_some()); + } + + #[test] + fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { + let msg_inner = test_catch_up(); + + let waiting_block_1 = + BlockGlobalMessage:: { inner: msg_inner.clone(), target_number: 1 }; + + let waiting_block_2 = BlockGlobalMessage:: { inner: msg_inner, target_number: 2 }; + + // Calling wait_completed with wrong block number should yield None. + assert!(waiting_block_1.wait_completed(1234).is_none()); + + // All blocks, that the message depended on, have been imported. Still, given the above + // block number mismatch this should return None. + assert!(waiting_block_2.wait_completed(2).is_none()); + } + + #[test] + fn metrics_cleans_up_after_itself() { + let r = Registry::new(); + + let mut m1 = Metrics::register(&r).unwrap(); + let m2 = m1.clone(); + + // Add a new message to the 'queue' of m1. + m1.waiting_messages_inc(); + + // m1 and m2 are synced through the shared atomic. + assert_eq!(1, m2.global_waiting_messages.get()); + + // Drop 'queue' m1. + drop(m1); + + // Make sure m1 cleaned up after itself, removing all messages that were left in its queue + // when dropped from the global metric. + assert_eq!(0, m2.global_waiting_messages.get()); + } +} diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs new file mode 100644 index 00000000..27a91d54 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -0,0 +1,440 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Handling custom voting rules for GRANDPA. +//! +//! This exposes the `VotingRule` trait used to implement arbitrary voting +//! restrictions that are taken into account by the GRANDPA environment when +//! selecting a finality target to vote on. + +use std::{future::Future, pin::Pin, sync::Arc}; + +use dyn_clone::DynClone; + +use sc_client_api::blockchain::HeaderBackend; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, Zero}; + +/// A future returned by a `VotingRule` to restrict a given vote, if any restriction is necessary. +pub type VotingRuleResult = + Pin::Hash, NumberFor)>> + Send>>; + +/// A trait for custom voting rules in GRANDPA. +pub trait VotingRule: DynClone + Send + Sync +where + Block: BlockT, + B: HeaderBackend, +{ + /// Restrict the given `current_target` vote, returning the block hash and + /// number of the block to vote on, and `None` in case the vote should not + /// be restricted. `base` is the block that we're basing our votes on in + /// order to pick our target (e.g. last round estimate), and `best_target` + /// is the initial best vote target before any vote rules were applied. When + /// applying multiple `VotingRule`s both `base` and `best_target` should + /// remain unchanged. + /// + /// The contract of this interface requires that when restricting a vote, the + /// returned value **must** be an ancestor of the given `current_target`, + /// this also means that a variant must be maintained throughout the + /// execution of voting rules wherein `current_target <= best_target`. + fn restrict_vote( + &self, + backend: Arc, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> VotingRuleResult; +} + +impl VotingRule for () +where + Block: BlockT, + B: HeaderBackend, +{ + fn restrict_vote( + &self, + _backend: Arc, + _base: &Block::Header, + _best_target: &Block::Header, + _current_target: &Block::Header, + ) -> VotingRuleResult { + Box::pin(async { None }) + } +} + +/// A custom voting rule that guarantees that our vote is always behind the best +/// block by at least N blocks, unless the base number is < N blocks behind the +/// best, in which case it votes for the base. +/// +/// In the best case our vote is exactly N blocks +/// behind the best block, but if there is a scenario where either +/// >34% of validators run without this rule or the fork-choice rule +/// can prioritize shorter chains over longer ones, the vote may be +/// closer to the best block than N. +#[derive(Clone)] +pub struct BeforeBestBlockBy(pub N); +impl VotingRule for BeforeBestBlockBy> +where + Block: BlockT, + B: HeaderBackend, +{ + fn restrict_vote( + &self, + backend: Arc, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> VotingRuleResult { + use sp_arithmetic::traits::Saturating; + + if current_target.number().is_zero() { + return Box::pin(async { None }) + } + + // Constrain to the base number, if that's the minimal + // vote that can be placed. + if *base.number() + self.0 > *best_target.number() { + return Box::pin(std::future::ready(Some((base.hash(), *base.number())))) + } + + // find the target number restricted by this rule + let target_number = best_target.number().saturating_sub(self.0); + + // our current target is already lower than this rule would restrict + if target_number >= *current_target.number() { + return Box::pin(async { None }) + } + + let current_target = current_target.clone(); + + // find the block at the given target height + Box::pin(std::future::ready(find_target(&*backend, target_number, ¤t_target))) + } +} + +/// A custom voting rule that limits votes towards 3/4 of the unfinalized chain, +/// using the given `base` and `best_target` to figure where the 3/4 target +/// should fall. +#[derive(Clone)] +pub struct ThreeQuartersOfTheUnfinalizedChain; + +impl VotingRule for ThreeQuartersOfTheUnfinalizedChain +where + Block: BlockT, + B: HeaderBackend, +{ + fn restrict_vote( + &self, + backend: Arc, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> VotingRuleResult { + // target a vote towards 3/4 of the unfinalized chain (rounding up) + let target_number = { + let two = NumberFor::::one() + One::one(); + let three = two + One::one(); + let four = three + One::one(); + + let diff = *best_target.number() - *base.number(); + let diff = ((diff * three) + two) / four; + + *base.number() + diff + }; + + // our current target is already lower than this rule would restrict + if target_number >= *current_target.number() { + return Box::pin(async { None }) + } + + // find the block at the given target height + Box::pin(std::future::ready(find_target(&*backend, target_number, current_target))) + } +} + +// walk backwards until we find the target block +fn find_target( + backend: &B, + target_number: NumberFor, + current_header: &Block::Header, +) -> Option<(Block::Hash, NumberFor)> +where + Block: BlockT, + B: HeaderBackend, +{ + let mut target_hash = current_header.hash(); + let mut target_header = current_header.clone(); + + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ + blocks are stored contiguously; \ + qed" + ); + } + + if *target_header.number() == target_number { + return Some((target_hash, target_number)) + } + + target_hash = *target_header.parent_hash(); + target_header = backend + .header(target_hash) + .ok()? + .expect("Header known to exist due to the existence of one of its descendents; qed"); + } +} + +struct VotingRules { + rules: Arc>>>, +} + +impl Clone for VotingRules { + fn clone(&self) -> Self { + VotingRules { rules: self.rules.clone() } + } +} + +impl VotingRule for VotingRules +where + Block: BlockT, + B: HeaderBackend + 'static, +{ + fn restrict_vote( + &self, + backend: Arc, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> VotingRuleResult { + let rules = self.rules.clone(); + let base = base.clone(); + let best_target = best_target.clone(); + let current_target = current_target.clone(); + + Box::pin(async move { + let mut restricted_target = current_target.clone(); + + for rule in rules.iter() { + if let Some(header) = rule + .restrict_vote(backend.clone(), &base, &best_target, &restricted_target) + .await + .filter(|(_, restricted_number)| { + // NOTE: we can only restrict votes within the interval [base, target) + restricted_number >= base.number() && + restricted_number < restricted_target.number() + }) + .and_then(|(hash, _)| backend.header(hash).ok()) + .and_then(std::convert::identity) + { + restricted_target = header; + } + } + + let restricted_hash = restricted_target.hash(); + + if restricted_hash != current_target.hash() { + Some((restricted_hash, *restricted_target.number())) + } else { + None + } + }) + } +} + +/// A builder of a composite voting rule that applies a set of rules to +/// progressively restrict the vote. +pub struct VotingRulesBuilder { + rules: Vec>>, +} + +impl Default for VotingRulesBuilder +where + Block: BlockT, + B: HeaderBackend + 'static, +{ + fn default() -> Self { + VotingRulesBuilder::new() + .add(BeforeBestBlockBy(2u32.into())) + .add(ThreeQuartersOfTheUnfinalizedChain) + } +} + +impl VotingRulesBuilder +where + Block: BlockT, + B: HeaderBackend + 'static, +{ + /// Return a new voting rule builder using the given backend. + pub fn new() -> Self { + VotingRulesBuilder { rules: Vec::new() } + } + + /// Add a new voting rule to the builder. + pub fn add(mut self, rule: R) -> Self + where + R: VotingRule + 'static, + { + self.rules.push(Box::new(rule)); + self + } + + /// Add all given voting rules to the builder. + pub fn add_all(mut self, rules: I) -> Self + where + I: IntoIterator>>, + { + self.rules.extend(rules); + self + } + + /// Return a new `VotingRule` that applies all of the previously added + /// voting rules in-order. + pub fn build(self) -> impl VotingRule + Clone { + VotingRules { rules: Arc::new(self.rules) } + } +} + +impl VotingRule for Box> +where + Block: BlockT, + B: HeaderBackend, + Self: Clone, +{ + fn restrict_vote( + &self, + backend: Arc, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> VotingRuleResult { + (**self).restrict_vote(backend, base, best_target, current_target) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::BlockOrigin; + use sp_runtime::traits::Header as _; + + use substrate_test_runtime_client::{ + runtime::{Block, Header}, + Backend, Client, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + /// A mock voting rule that subtracts a static number of block from the `current_target`. + #[derive(Clone)] + struct Subtract(u64); + impl VotingRule> for Subtract { + fn restrict_vote( + &self, + backend: Arc>, + _base: &Header, + _best_target: &Header, + current_target: &Header, + ) -> VotingRuleResult { + let target_number = current_target.number() - self.0; + let res = backend + .hash(target_number) + .unwrap() + .map(|target_hash| (target_hash, target_number)); + + Box::pin(std::future::ready(res)) + } + } + + #[test] + fn multiple_voting_rules_cannot_restrict_past_base() { + // setup an aggregate voting rule composed of two voting rules + // where each subtracts 50 blocks from the current target + let rule = VotingRulesBuilder::new().add(Subtract(50)).add(Subtract(50)).build(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + let mut hashes = Vec::with_capacity(200); + + for _ in 0..200 { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + hashes.push(block.hash()); + + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + } + + let genesis = client.header(client.info().genesis_hash).unwrap().unwrap(); + + let best = client.header(client.info().best_hash).unwrap().unwrap(); + + let (_, number) = + futures::executor::block_on(rule.restrict_vote(client.clone(), &genesis, &best, &best)) + .unwrap(); + + // we apply both rules which should subtract 100 blocks from best block (#200) + // which means that we should be voting for block #100 + assert_eq!(number, 100); + + let block110 = client.header(hashes[109]).unwrap().unwrap(); + + let (_, number) = futures::executor::block_on(rule.restrict_vote( + client.clone(), + &block110, + &best, + &best, + )) + .unwrap(); + + // base block is #110 while best block is #200, applying both rules would make + // would make the target block (#100) be lower than the base block, therefore + // only one of the rules is applied. + assert_eq!(number, 150); + } + + #[test] + fn before_best_by_has_cutoff_at_base() { + let rule = BeforeBestBlockBy(2); + + let mut client = Arc::new(TestClientBuilder::new().build()); + + let n = 5; + let mut hashes = Vec::with_capacity(n); + for _ in 0..n { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + hashes.push(block.hash()); + + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + } + + let best = client.header(client.info().best_hash).unwrap().unwrap(); + let best_number = *best.number(); + + for i in 0..n { + let base = client.header(hashes[i]).unwrap().unwrap(); + let (_, number) = futures::executor::block_on(rule.restrict_vote( + client.clone(), + &base, + &best, + &best, + )) + .unwrap(); + + let expected = std::cmp::max(best_number - 2, *base.number()); + assert_eq!(number, expected, "best = {}, lag = 2, base = {}", best_number, i); + } + } +} diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs new file mode 100644 index 00000000..ec2d25c3 --- /dev/null +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -0,0 +1,444 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Utilities for generating and verifying GRANDPA warp sync proofs. + +use sp_runtime::codec::{self, Decode, Encode}; + +use crate::{ + best_justification, find_scheduled_change, AuthoritySetChanges, AuthoritySetHardFork, + BlockNumberOps, GrandpaJustification, SharedAuthoritySet, +}; +use sc_client_api::Backend as ClientBackend; +use sc_network_common::sync::warp::{EncodedProof, VerificationResult, WarpSyncProvider}; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; +use sp_consensus_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, +}; + +use std::{collections::HashMap, sync::Arc}; + +/// Warp proof processing error. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Decoding error. + #[error("Failed to decode block hash: {0}.")] + DecodeScale(#[from] codec::Error), + /// Client backend error. + #[error("{0}")] + Client(#[from] sp_blockchain::Error), + /// Invalid request data. + #[error("{0}")] + InvalidRequest(String), + /// Invalid warp proof. + #[error("{0}")] + InvalidProof(String), + /// Missing header or authority set change data. + #[error("Missing required data to be able to answer request.")] + MissingData, +} + +/// The maximum size in bytes of the `WarpSyncProof`. +pub(super) const MAX_WARP_SYNC_PROOF_SIZE: usize = 8 * 1024 * 1024; + +/// A proof of an authority set change. +#[derive(Decode, Encode, Debug)] +pub struct WarpSyncFragment { + /// The last block that the given authority set finalized. This block should contain a digest + /// signaling an authority set change from which we can fetch the next authority set. + pub header: Block::Header, + /// A justification for the header above which proves its finality. In order to validate it the + /// verifier must be aware of the authorities and set id for which the justification refers to. + pub justification: GrandpaJustification, +} + +/// An accumulated proof of multiple authority set changes. +#[derive(Decode, Encode)] +pub struct WarpSyncProof { + proofs: Vec>, + is_finished: bool, +} + +impl WarpSyncProof { + /// Generates a warp sync proof starting at the given block. It will generate authority set + /// change proofs for all changes that happened from `begin` until the current authority set + /// (capped by MAX_WARP_SYNC_PROOF_SIZE). + fn generate( + backend: &Backend, + begin: Block::Hash, + set_changes: &AuthoritySetChanges>, + ) -> Result, Error> + where + Backend: ClientBackend, + { + // TODO: cache best response (i.e. the one with lowest begin_number) + let blockchain = backend.blockchain(); + + let begin_number = blockchain + .block_number_from_id(&BlockId::Hash(begin))? + .ok_or_else(|| Error::InvalidRequest("Missing start block".to_string()))?; + + if begin_number > blockchain.info().finalized_number { + return Err(Error::InvalidRequest("Start block is not finalized".to_string())) + } + + let canon_hash = blockchain.hash(begin_number)?.expect( + "begin number is lower than finalized number; \ + all blocks below finalized number must have been imported; \ + qed.", + ); + + if canon_hash != begin { + return Err(Error::InvalidRequest( + "Start block is not in the finalized chain".to_string(), + )) + } + + let mut proofs = Vec::new(); + let mut proofs_encoded_len = 0; + let mut proof_limit_reached = false; + + let set_changes = set_changes.iter_from(begin_number).ok_or(Error::MissingData)?; + + for (_, last_block) in set_changes { + let hash = blockchain.block_hash_from_id(&BlockId::Number(*last_block))? + .expect("header number comes from previously applied set changes; corresponding hash must exist in db; qed."); + + let header = blockchain + .header(hash)? + .expect("header hash obtained from header number exists in db; corresponding header must exist in db too; qed."); + + // the last block in a set is the one that triggers a change to the next set, + // therefore the block must have a digest that signals the authority set change + if find_scheduled_change::(&header).is_none() { + // if it doesn't contain a signal for standard change then the set must have changed + // through a forced changed, in which case we stop collecting proofs as the chain of + // trust in authority handoffs was broken. + break + } + + let justification = blockchain + .justifications(header.hash())? + .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) + .ok_or_else(|| Error::MissingData)?; + + let justification = GrandpaJustification::::decode(&mut &justification[..])?; + + let proof = WarpSyncFragment { header: header.clone(), justification }; + let proof_size = proof.encoded_size(); + + // Check for the limit. We remove some bytes from the maximum size, because we're only + // counting the size of the `WarpSyncFragment`s. The extra margin is here to leave + // room for rest of the data (the size of the `Vec` and the boolean). + if proofs_encoded_len + proof_size >= MAX_WARP_SYNC_PROOF_SIZE - 50 { + proof_limit_reached = true; + break + } + + proofs_encoded_len += proof_size; + proofs.push(proof); + } + + let is_finished = if proof_limit_reached { + false + } else { + let latest_justification = best_justification(backend)?.filter(|justification| { + // the existing best justification must be for a block higher than the + // last authority set change. if we didn't prove any authority set + // change then we fallback to make sure it's higher or equal to the + // initial warp sync block. + let limit = proofs + .last() + .map(|proof| proof.justification.target().0 + One::one()) + .unwrap_or(begin_number); + + justification.target().0 >= limit + }); + + if let Some(latest_justification) = latest_justification { + let header = blockchain.header(latest_justification.target().1)? + .expect("header hash corresponds to a justification in db; must exist in db as well; qed."); + + proofs.push(WarpSyncFragment { header, justification: latest_justification }) + } + + true + }; + + let final_outcome = WarpSyncProof { proofs, is_finished }; + debug_assert!(final_outcome.encoded_size() <= MAX_WARP_SYNC_PROOF_SIZE); + Ok(final_outcome) + } + + /// Verifies the warp sync proof starting at the given set id and with the given authorities. + /// Verification stops when either the proof is exhausted or finality for the target header can + /// be proven. If the proof is valid the new set id and authorities is returned. + fn verify( + &self, + set_id: SetId, + authorities: AuthorityList, + hard_forks: &HashMap<(Block::Hash, NumberFor), (SetId, AuthorityList)>, + ) -> Result<(SetId, AuthorityList), Error> + where + NumberFor: BlockNumberOps, + { + let mut current_set_id = set_id; + let mut current_authorities = authorities; + + for (fragment_num, proof) in self.proofs.iter().enumerate() { + let hash = proof.header.hash(); + let number = *proof.header.number(); + + if let Some((set_id, list)) = hard_forks.get(&(hash, number)) { + current_set_id = *set_id; + current_authorities = list.clone(); + } else { + proof + .justification + .verify(current_set_id, ¤t_authorities) + .map_err(|err| Error::InvalidProof(err.to_string()))?; + + if proof.justification.target().1 != hash { + return Err(Error::InvalidProof( + "Mismatch between header and justification".to_owned(), + )) + } + + if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { + current_authorities = scheduled_change.next_authorities; + current_set_id += 1; + } else if fragment_num != self.proofs.len() - 1 || !self.is_finished { + // Only the last fragment of the last proof message is allowed to be missing the + // authority set change. + return Err(Error::InvalidProof( + "Header is missing authority set change digest".to_string(), + )) + } + } + } + Ok((current_set_id, current_authorities)) + } +} + +/// Implements network API for warp sync. +pub struct NetworkProvider> +where + NumberFor: BlockNumberOps, +{ + backend: Arc, + authority_set: SharedAuthoritySet>, + hard_forks: HashMap<(Block::Hash, NumberFor), (SetId, AuthorityList)>, +} + +impl> NetworkProvider +where + NumberFor: BlockNumberOps, +{ + /// Create a new istance for a given backend and authority set. + pub fn new( + backend: Arc, + authority_set: SharedAuthoritySet>, + hard_forks: Vec>, + ) -> Self { + NetworkProvider { + backend, + authority_set, + hard_forks: hard_forks + .into_iter() + .map(|fork| (fork.block, (fork.set_id, fork.authorities))) + .collect(), + } + } +} + +impl> WarpSyncProvider + for NetworkProvider +where + NumberFor: BlockNumberOps, +{ + fn generate( + &self, + start: Block::Hash, + ) -> Result> { + let proof = WarpSyncProof::::generate( + &*self.backend, + start, + &self.authority_set.authority_set_changes(), + ) + .map_err(Box::new)?; + Ok(EncodedProof(proof.encode())) + } + + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box> { + let EncodedProof(proof) = proof; + let proof = WarpSyncProof::::decode(&mut proof.as_slice()) + .map_err(|e| format!("Proof decoding error: {:?}", e))?; + let last_header = proof + .proofs + .last() + .map(|p| p.header.clone()) + .ok_or_else(|| "Empty proof".to_string())?; + let (next_set_id, next_authorities) = + proof.verify(set_id, authorities, &self.hard_forks).map_err(Box::new)?; + if proof.is_finished { + Ok(VerificationResult::::Complete(next_set_id, next_authorities, last_header)) + } else { + Ok(VerificationResult::::Partial( + next_set_id, + next_authorities, + last_header.hash(), + )) + } + } + + fn current_authorities(&self) -> AuthorityList { + self.authority_set.inner().current_authorities.clone() + } +} + +#[cfg(test)] +mod tests { + use super::{codec::Encode, WarpSyncProof}; + use crate::{AuthoritySetChanges, GrandpaJustification}; + use rand::prelude::*; + use sc_block_builder::BlockBuilderProvider; + use sp_blockchain::HeaderBackend; + use sp_consensus::BlockOrigin; + use sp_consensus_grandpa::GRANDPA_ENGINE_ID; + use sp_keyring::Ed25519Keyring; + use std::sync::Arc; + use substrate_test_runtime_client::{ + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClientBuilder, TestClientBuilderExt, + }; + + #[test] + fn warp_sync_proof_generate_verify() { + let mut rng = rand::rngs::StdRng::from_seed([0; 32]); + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let available_authorities = Ed25519Keyring::iter().collect::>(); + let genesis_authorities = vec![(Ed25519Keyring::Alice.public().into(), 1)]; + + let mut current_authorities = vec![Ed25519Keyring::Alice]; + let mut current_set_id = 0; + let mut authority_set_changes = Vec::new(); + + for n in 1..=100 { + let mut builder = client.new_block(Default::default()).unwrap(); + let mut new_authorities = None; + + // we will trigger an authority set change every 10 blocks + if n != 0 && n % 10 == 0 { + // pick next authorities and add digest for the set change + let n_authorities = rng.gen_range(1..available_authorities.len()); + let next_authorities = available_authorities + .choose_multiple(&mut rng, n_authorities) + .cloned() + .collect::>(); + + new_authorities = Some(next_authorities.clone()); + + let next_authorities = next_authorities + .iter() + .map(|keyring| (keyring.public().into(), 1)) + .collect::>(); + + let digest = sp_runtime::generic::DigestItem::Consensus( + sp_consensus_grandpa::GRANDPA_ENGINE_ID, + sp_consensus_grandpa::ConsensusLog::ScheduledChange( + sp_consensus_grandpa::ScheduledChange { delay: 0u64, next_authorities }, + ) + .encode(), + ); + + builder.push_deposit_log_digest_item(digest).unwrap(); + } + + let block = builder.build().unwrap().block; + + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + if let Some(new_authorities) = new_authorities { + // generate a justification for this block, finalize it and note the authority set + // change + let (target_hash, target_number) = { + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let mut precommits = Vec::new(); + for keyring in ¤t_authorities { + let precommit = finality_grandpa::Precommit { target_hash, target_number }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_consensus_grandpa::localized_payload(42, current_set_id, &msg); + let signature = keyring.sign(&encoded[..]).into(); + + let precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: keyring.public().into(), + }; + + precommits.push(precommit); + } + + let commit = finality_grandpa::Commit { target_hash, target_number, precommits }; + + let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); + + client + .finalize_block(target_hash, Some((GRANDPA_ENGINE_ID, justification.encode()))) + .unwrap(); + + authority_set_changes.push((current_set_id, n)); + + current_set_id += 1; + current_authorities = new_authorities; + } + } + + let authority_set_changes = AuthoritySetChanges::from(authority_set_changes); + + // generate a warp sync proof + let genesis_hash = client.hash(0).unwrap().unwrap(); + + let warp_sync_proof = + WarpSyncProof::generate(&*backend, genesis_hash, &authority_set_changes).unwrap(); + + // verifying the proof should yield the last set id and authorities + let (new_set_id, new_authorities) = + warp_sync_proof.verify(0, genesis_authorities, &Default::default()).unwrap(); + + let expected_authorities = current_authorities + .iter() + .map(|keyring| (keyring.public().into(), 1)) + .collect::>(); + + assert_eq!(new_set_id, current_set_id); + assert_eq!(new_authorities, expected_authorities); + } +}