diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index d1f3a201c80a..356abaa93cdd 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -131,6 +131,14 @@ zombienet-polkadot-functional-0008-dispute-old-finalized: --local-dir="${LOCAL_DIR}/functional" --test="0008-dispute-old-finalized.zndsl" +zombienet-polkadot-functional-0009-approval-voting-coalescing: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0009-approval-voting-coalescing.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index d9e4155d9c5e..ab56b62c4ca5 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -24,7 +24,7 @@ use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, slashing, - vstaging::NodeFeatures, + vstaging::{ApprovalVotingParams, NodeFeatures}, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sc_client_api::AuxStore; @@ -427,6 +427,18 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + at: Hash, + session_index: polkadot_primitives::SessionIndex, + ) -> Result { + Ok(self + .rpc_client + .parachain_host_staging_approval_voting_params(at, session_index) + .await?) + } + async fn node_features(&self, at: Hash) -> Result { Ok(self.rpc_client.parachain_host_node_features(at).await?) } diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 8e0d5fae6777..c64fff77a29f 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -32,7 +32,7 @@ use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, slashing, - vstaging::NodeFeatures, + vstaging::{ApprovalVotingParams, NodeFeatures}, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, @@ -625,6 +625,19 @@ impl RelayChainRpcClient { } #[allow(missing_docs)] + pub async fn parachain_host_staging_approval_voting_params( + &self, + at: RelayHash, + _session_index: SessionIndex, + ) -> Result { + self.call_remote_runtime_function( + "ParachainHost_staging_approval_voting_params", + at, + None::<()>, + ) + .await + } + pub async fn parachain_host_para_backing_state( &self, at: RelayHash, diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index 7ace96147106..0791f63235fb 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -24,7 +24,7 @@ use emulated_integration_tests_common::{ // Rococo declaration decl_test_relay_chains! { - #[api_version(9)] + #[api_version(10)] pub struct Rococo { genesis = genesis::genesis(), on_init = (), diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index 2ba47250d564..8a5d4bbf8085 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -24,7 +24,7 @@ use emulated_integration_tests_common::{ // Westend declaration decl_test_relay_chains! { - #[api_version(9)] + #[api_version(10)] pub struct Westend { genesis = genesis::genesis(), on_init = (), diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 6896d3c4b224..d8c6cc51bea4 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -64,7 +64,6 @@ jemalloc-allocator = [ "polkadot-node-core-pvf/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator", ] -network-protocol-staging = ["polkadot-cli/network-protocol-staging"] # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 77f73ead2ed4..95b63913ccbc 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -75,5 +75,3 @@ runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", "service/runtime-metrics", ] - -network-protocol-staging = ["service/network-protocol-staging"] diff --git a/polkadot/node/core/approval-voting/src/approval_checking.rs b/polkadot/node/core/approval-voting/src/approval_checking.rs index 5d24ff164193..0aa6102fbd6d 100644 --- a/polkadot/node/core/approval-voting/src/approval_checking.rs +++ b/polkadot/node/core/approval-voting/src/approval_checking.rs @@ -25,6 +25,15 @@ use crate::{ time::Tick, }; +/// Result of counting the necessary tranches needed for approving a block. +#[derive(Debug, PartialEq, Clone)] +pub struct TranchesToApproveResult { + /// The required tranches for approving this block + pub required_tranches: RequiredTranches, + /// The total number of no_shows at the moment we are doing the counting. + pub total_observed_no_shows: usize, +} + /// The required tranches of assignments needed to determine whether a candidate is approved. #[derive(Debug, PartialEq, Clone)] pub enum RequiredTranches { @@ -64,7 +73,7 @@ pub enum RequiredTranches { } /// The result of a check. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub enum Check { /// The candidate is unapproved. Unapproved, @@ -178,6 +187,7 @@ struct State { next_no_show: Option, /// The last tick at which a considered assignment was received. last_assignment_tick: Option, + total_observed_no_shows: usize, } impl State { @@ -187,41 +197,53 @@ impl State { needed_approvals: usize, n_validators: usize, no_show_duration: Tick, - ) -> RequiredTranches { + ) -> TranchesToApproveResult { let covering = if self.depth == 0 { 0 } else { self.covering }; if self.depth != 0 && self.assignments + covering + self.uncovered >= n_validators { - return RequiredTranches::All + return TranchesToApproveResult { + required_tranches: RequiredTranches::All, + total_observed_no_shows: self.total_observed_no_shows, + } } // If we have enough assignments and all no-shows are covered, we have reached the number // of tranches that we need to have. if self.assignments >= needed_approvals && (covering + self.uncovered) == 0 { - return RequiredTranches::Exact { - needed: tranche, - tolerated_missing: self.covered, - next_no_show: self.next_no_show, - last_assignment_tick: self.last_assignment_tick, + return TranchesToApproveResult { + required_tranches: RequiredTranches::Exact { + needed: tranche, + tolerated_missing: self.covered, + next_no_show: self.next_no_show, + last_assignment_tick: self.last_assignment_tick, + }, + total_observed_no_shows: self.total_observed_no_shows, } } // We're pending more assignments and should look at more tranches. let clock_drift = self.clock_drift(no_show_duration); if self.depth == 0 { - RequiredTranches::Pending { - considered: tranche, - next_no_show: self.next_no_show, - // during the initial assignment-gathering phase, we want to accept assignments - // from any tranche. Note that honest validators will still not broadcast their - // assignment until it is time to do so, regardless of this value. - maximum_broadcast: DelayTranche::max_value(), - clock_drift, + TranchesToApproveResult { + required_tranches: RequiredTranches::Pending { + considered: tranche, + next_no_show: self.next_no_show, + // during the initial assignment-gathering phase, we want to accept assignments + // from any tranche. Note that honest validators will still not broadcast their + // assignment until it is time to do so, regardless of this value. + maximum_broadcast: DelayTranche::max_value(), + clock_drift, + }, + total_observed_no_shows: self.total_observed_no_shows, } } else { - RequiredTranches::Pending { - considered: tranche, - next_no_show: self.next_no_show, - maximum_broadcast: tranche + (covering + self.uncovered) as DelayTranche, - clock_drift, + TranchesToApproveResult { + required_tranches: RequiredTranches::Pending { + considered: tranche, + next_no_show: self.next_no_show, + maximum_broadcast: tranche + (covering + self.uncovered) as DelayTranche, + clock_drift, + }, + total_observed_no_shows: self.total_observed_no_shows, } } } @@ -276,6 +298,7 @@ impl State { uncovered, next_no_show, last_assignment_tick, + total_observed_no_shows: self.total_observed_no_shows + new_no_shows, } } } @@ -372,7 +395,7 @@ pub fn tranches_to_approve( block_tick: Tick, no_show_duration: Tick, needed_approvals: usize, -) -> RequiredTranches { +) -> TranchesToApproveResult { let tick_now = tranche_now as Tick + block_tick; let n_validators = approval_entry.n_validators(); @@ -384,6 +407,7 @@ pub fn tranches_to_approve( uncovered: 0, next_no_show: None, last_assignment_tick: None, + total_observed_no_shows: 0, }; // The `ApprovalEntry` doesn't have any data for empty tranches. We still want to iterate over @@ -434,7 +458,7 @@ pub fn tranches_to_approve( let s = s.advance(n_assignments, no_shows, next_no_show, last_assignment_tick); let output = s.output(tranche, needed_approvals, n_validators, no_show_duration); - *state = match output { + *state = match output.required_tranches { RequiredTranches::Exact { .. } | RequiredTranches::All => { // Wipe the state clean so the next iteration of this closure will terminate // the iterator. This guarantees that we can call `last` further down to see @@ -464,15 +488,17 @@ mod tests { #[test] fn pending_is_not_approved() { - let candidate = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: BitVec::default(), - } - .into(); + let candidate = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: BitVec::default(), + }, + 0, + ); - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: BitVec::default(), our_assignment: None, @@ -497,29 +523,31 @@ mod tests { #[test] fn exact_takes_only_assignments_up_to() { - let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: bitvec![u8, BitOrderLsb0; 0; 10], - } - .into(); + let mut candidate: CandidateEntry = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: bitvec![u8, BitOrderLsb0; 0; 10], + }, + 0, + ); for i in 0..3 { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: vec![ - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 0, assignments: (0..2).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 2, assignments: (5..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, @@ -569,29 +597,31 @@ mod tests { #[test] fn one_honest_node_always_approves() { - let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: bitvec![u8, BitOrderLsb0; 0; 10], - } - .into(); + let mut candidate: CandidateEntry = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: bitvec![u8, BitOrderLsb0; 0; 10], + }, + 0, + ); for i in 0..3 { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: vec![ - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 0, assignments: (0..4).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 1, assignments: (4..6).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 2, assignments: (6..10).map(|i| (ValidatorIndex(i), 0.into())).collect(), }, @@ -647,7 +677,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 5], our_assignment: None, @@ -675,7 +705,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 1, tolerated_missing: 0, @@ -691,7 +722,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, @@ -715,7 +746,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 2, next_no_show: Some(block_tick + no_show_duration), @@ -731,7 +763,7 @@ mod tests { let no_show_duration = 10; let needed_approvals = 4; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; 10], our_assignment: None, @@ -759,7 +791,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 11, next_no_show: None, @@ -776,7 +809,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -807,7 +840,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 1, next_no_show: None, @@ -826,7 +860,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 1, next_no_show: None, @@ -843,7 +878,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -879,7 +914,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 1, tolerated_missing: 0, @@ -898,7 +934,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 2, tolerated_missing: 1, @@ -917,7 +954,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 2, next_no_show: None, @@ -934,7 +972,7 @@ mod tests { let needed_approvals = 4; let n_validators = 8; - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), assigned_validators: bitvec![u8, BitOrderLsb0; 0; n_validators], our_assignment: None, @@ -970,7 +1008,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 2, tolerated_missing: 1, @@ -992,7 +1031,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 2, next_no_show: None, @@ -1013,7 +1053,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Exact { needed: 3, tolerated_missing: 2, @@ -1029,22 +1070,24 @@ mod tests { let no_show_duration = 10; let needed_approvals = 3; - let mut candidate: CandidateEntry = approval_db::v1::CandidateEntry { - candidate: dummy_candidate_receipt(dummy_hash()), - session: 0, - block_assignments: BTreeMap::default(), - approvals: bitvec![u8, BitOrderLsb0; 0; 3], - } - .into(); + let mut candidate: CandidateEntry = CandidateEntry::from_v1( + approval_db::v1::CandidateEntry { + candidate: dummy_candidate_receipt(dummy_hash()), + session: 0, + block_assignments: BTreeMap::default(), + approvals: bitvec![u8, BitOrderLsb0; 0; 3], + }, + 0, + ); for i in 0..3 { candidate.mark_approval(ValidatorIndex(i)); } - let approval_entry = approval_db::v2::ApprovalEntry { + let approval_entry = approval_db::v3::ApprovalEntry { tranches: vec![ // Assignments with invalid validator indexes. - approval_db::v2::TrancheEntry { + approval_db::v3::TrancheEntry { tranche: 1, assignments: (2..5).map(|i| (ValidatorIndex(i), 1.into())).collect(), }, @@ -1068,7 +1111,8 @@ mod tests { block_tick, no_show_duration, needed_approvals, - ), + ) + .required_tranches, RequiredTranches::Pending { considered: 10, next_no_show: None, @@ -1094,7 +1138,7 @@ mod tests { ]; for test_tranche in test_tranches { - let mut approval_entry: ApprovalEntry = approval_db::v2::ApprovalEntry { + let mut approval_entry: ApprovalEntry = approval_db::v3::ApprovalEntry { tranches: Vec::new(), backing_group: GroupIndex(0), our_assignment: None, @@ -1345,10 +1389,11 @@ mod tests { uncovered: 0, next_no_show: None, last_assignment_tick: None, + total_observed_no_shows: 0, }; assert_eq!( - state.output(0, 10, 10, 20), + state.output(0, 10, 10, 20).required_tranches, RequiredTranches::Pending { considered: 0, next_no_show: None, @@ -1368,10 +1413,11 @@ mod tests { uncovered: 0, next_no_show: None, last_assignment_tick: None, + total_observed_no_shows: 0, }; assert_eq!( - state.output(0, 10, 10, 20), + state.output(0, 10, 10, 20).required_tranches, RequiredTranches::Exact { needed: 0, tolerated_missing: 0, diff --git a/polkadot/node/core/approval-voting/src/approval_db/common/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/common/migration_helpers.rs new file mode 100644 index 000000000000..e21c53e4cac8 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/common/migration_helpers.rs @@ -0,0 +1,39 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; + +use polkadot_node_primitives::approval::{ + v1::{AssignmentCert, AssignmentCertKind, VrfProof, VrfSignature, RELAY_VRF_MODULO_CONTEXT}, + v2::VrfPreOutput, +}; +pub fn make_bitvec(len: usize) -> BitVec { + bitvec::bitvec![u8, BitOrderLsb0; 0; len] +} + +pub fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { + let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); + let msg = b"test-garbage"; + let mut prng = rand_core::OsRng; + let keypair = schnorrkel::Keypair::generate_with(&mut prng); + let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); + let out = inout.to_output(); + + AssignmentCert { + kind, + vrf: VrfSignature { pre_output: VrfPreOutput(out), proof: VrfProof(proof) }, + } +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs new file mode 100644 index 000000000000..249dcf912df5 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/common/mod.rs @@ -0,0 +1,293 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Common helper functions for all versions of approval-voting database. +use std::sync::Arc; + +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; +use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_primitives::{BlockNumber, CandidateHash, CandidateIndex, Hash}; + +use crate::{ + backend::{Backend, BackendWriteOp, V1ReadBackend, V2ReadBackend}, + persisted_entries, +}; + +use super::{ + v2::{load_block_entry_v1, load_candidate_entry_v1}, + v3::{load_block_entry_v2, load_candidate_entry_v2, BlockEntry, CandidateEntry}, +}; + +pub mod migration_helpers; + +const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; + +/// A range from earliest..last block number stored within the DB. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); +/// The database config. +#[derive(Debug, Clone, Copy)] +pub struct Config { + /// The column family in the database where data is stored. + pub col_approval_data: u32, +} + +/// `DbBackend` is a concrete implementation of the higher-level Backend trait +pub struct DbBackend { + inner: Arc, + config: Config, +} + +impl DbBackend { + /// Create a new [`DbBackend`] with the supplied key-value store and + /// config. + pub fn new(db: Arc, config: Config) -> Self { + DbBackend { inner: db, config } + } +} + +/// Errors while accessing things from the DB. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum Error { + Io(std::io::Error), + InvalidDecoding(parity_scale_codec::Error), + InternalError(SubsystemError), +} + +impl std::error::Error for Error {} + +/// Result alias for DB errors. +pub type Result = std::result::Result; + +impl Backend for DbBackend { + fn load_block_entry( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } + + fn load_candidate_entry( + &self, + candidate_hash: &CandidateHash, + ) -> SubsystemResult> { + load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) + } + + fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { + load_blocks_at_height(&*self.inner, &self.config, block_height) + } + + fn load_all_blocks(&self) -> SubsystemResult> { + load_all_blocks(&*self.inner, &self.config) + } + + fn load_stored_blocks(&self) -> SubsystemResult> { + load_stored_blocks(&*self.inner, &self.config) + } + + /// Atomically write the list of operations, with later operations taking precedence over prior. + fn write(&mut self, ops: I) -> SubsystemResult<()> + where + I: IntoIterator, + { + let mut tx = DBTransaction::new(); + for op in ops { + match op { + BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { + tx.put_vec( + self.config.col_approval_data, + &STORED_BLOCKS_KEY, + stored_block_range.encode(), + ); + }, + BackendWriteOp::DeleteStoredBlockRange => { + tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); + }, + BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { + tx.put_vec( + self.config.col_approval_data, + &blocks_at_height_key(h), + blocks.encode(), + ); + }, + BackendWriteOp::DeleteBlocksAtHeight(h) => { + tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); + }, + BackendWriteOp::WriteBlockEntry(block_entry) => { + let block_entry: BlockEntry = block_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + }, + BackendWriteOp::DeleteBlockEntry(hash) => { + tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); + }, + BackendWriteOp::WriteCandidateEntry(candidate_entry) => { + let candidate_entry: CandidateEntry = candidate_entry.into(); + tx.put_vec( + self.config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + }, + BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { + tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); + }, + } + } + + self.inner.write(tx).map_err(|e| e.into()) + } +} + +impl V1ReadBackend for DbBackend { + fn load_candidate_entry_v1( + &self, + candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, + ) -> SubsystemResult> { + load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) + .map(|e| e.map(|e| persisted_entries::CandidateEntry::from_v1(e, candidate_index))) + } + + fn load_block_entry_v1( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry_v1(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } +} + +impl V2ReadBackend for DbBackend { + fn load_candidate_entry_v2( + &self, + candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, + ) -> SubsystemResult> { + load_candidate_entry_v2(&*self.inner, &self.config, candidate_hash) + .map(|e| e.map(|e| persisted_entries::CandidateEntry::from_v2(e, candidate_index))) + } + + fn load_block_entry_v2( + &self, + block_hash: &Hash, + ) -> SubsystemResult> { + load_block_entry_v2(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) + } +} + +pub(crate) fn load_decode( + store: &dyn Database, + col_approval_data: u32, + key: &[u8], +) -> Result> { + match store.get(col_approval_data, key)? { + None => Ok(None), + Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), + } +} + +/// The key a given block entry is stored under. +pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { + const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(block_hash.as_ref()); + + key +} + +/// The key a given candidate entry is stored under. +pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { + const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); + + key +} + +/// The key a set of block hashes corresponding to a block number is stored under. +pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { + const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; + + let mut key = [0u8; 12 + 4]; + key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); + block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); + + key +} + +/// Return all blocks which have entries in the DB, ascending, by height. +pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { + let mut hashes = Vec::new(); + if let Some(stored_blocks) = load_stored_blocks(store, config)? { + for height in stored_blocks.0..stored_blocks.1 { + let blocks = load_blocks_at_height(store, config, &height)?; + hashes.extend(blocks); + } + } + + Ok(hashes) +} + +/// Load the stored-blocks key from the state. +pub fn load_stored_blocks( + store: &dyn Database, + config: &Config, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a blocks-at-height entry for a given block number. +pub fn load_blocks_at_height( + store: &dyn Database, + config: &Config, + block_number: &BlockNumber, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) + .map(|x| x.unwrap_or_default()) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a block entry from the aux store. +pub fn load_block_entry( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a candidate entry from the aux store in current version format. +pub fn load_candidate_entry( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/mod.rs index 20fb6aa82d8d..78942a507f4b 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/mod.rs @@ -30,5 +30,7 @@ //! In the future, we may use a temporary DB which doesn't need to be wiped, but for the //! time being we share the same DB with the rest of Substrate. +pub mod common; pub mod v1; pub mod v2; +pub mod v3; diff --git a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs index 07d8242b772e..b979cb7ef45f 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v1/tests.rs @@ -40,10 +40,6 @@ fn make_db() -> (DbBackend, Arc) { (DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer) } -fn make_bitvec(len: usize) -> BitVec { - bitvec::bitvec![u8, BitOrderLsb0; 0; len] -} - fn make_block_entry( block_hash: Hash, parent_hash: Hash, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs index efdba41b57a4..df6e4754dbd6 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/migration_helpers.rs @@ -16,29 +16,19 @@ //! Approval DB migration helpers. use super::*; -use crate::backend::Backend; -use polkadot_node_primitives::approval::v1::{ - AssignmentCert, AssignmentCertKind, VrfPreOutput, VrfProof, VrfSignature, - RELAY_VRF_MODULO_CONTEXT, +use crate::{ + approval_db::common::{ + migration_helpers::{dummy_assignment_cert, make_bitvec}, + Error, Result, StoredBlockRange, + }, + backend::Backend, }; + +use polkadot_node_primitives::approval::v1::AssignmentCertKind; use polkadot_node_subsystem_util::database::Database; use sp_application_crypto::sp_core::H256; use std::{collections::HashSet, sync::Arc}; -fn dummy_assignment_cert(kind: AssignmentCertKind) -> AssignmentCert { - let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT); - let msg = b"test-garbage"; - let mut prng = rand_core::OsRng; - let keypair = schnorrkel::Keypair::generate_with(&mut prng); - let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg)); - let preout = inout.to_output(); - - AssignmentCert { - kind, - vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) }, - } -} - fn make_block_entry_v1( block_hash: Hash, parent_hash: Hash, @@ -58,14 +48,10 @@ fn make_block_entry_v1( } } -fn make_bitvec(len: usize) -> BitVec { - bitvec::bitvec![u8, BitOrderLsb0; 0; len] -} - /// Migrates `OurAssignment`, `CandidateEntry` and `ApprovalEntry` to version 2. /// Returns on any error. /// Must only be used in parachains DB migration code - `polkadot-service` crate. -pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { +pub fn v1_to_latest(db: Arc, config: Config) -> Result<()> { let mut backend = crate::DbBackend::new(db, config); let all_blocks = backend .load_all_blocks() @@ -89,11 +75,13 @@ pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { let mut counter = 0; // Get all candidate entries, approval entries and convert each of them. for block in all_blocks { - for (_core_index, candidate_hash) in block.candidates() { + for (candidate_index, (_core_index, candidate_hash)) in + block.candidates().iter().enumerate() + { // Loading the candidate will also perform the conversion to the updated format and // return that represantation. if let Some(candidate_entry) = backend - .load_candidate_entry_v1(&candidate_hash) + .load_candidate_entry_v1(&candidate_hash, candidate_index as CandidateIndex) .map_err(|e| Error::InternalError(e))? { // Write the updated representation. @@ -113,42 +101,8 @@ pub fn v1_to_v2(db: Arc, config: Config) -> Result<()> { Ok(()) } -// Checks if the migration doesn't leave the DB in an unsane state. -// This function is to be used in tests. -pub fn v1_to_v2_sanity_check( - db: Arc, - config: Config, - expected_candidates: HashSet, -) -> Result<()> { - let backend = crate::DbBackend::new(db, config); - - let all_blocks = backend - .load_all_blocks() - .unwrap() - .iter() - .map(|block_hash| backend.load_block_entry(block_hash).unwrap().unwrap()) - .collect::>(); - - let mut candidates = HashSet::new(); - - // Iterate all blocks and approval entries. - for block in all_blocks { - for (_core_index, candidate_hash) in block.candidates() { - // Loading the candidate will also perform the conversion to the updated format and - // return that represantation. - if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { - candidates.insert(candidate_entry.candidate.hash()); - } - } - } - - assert_eq!(candidates, expected_candidates); - - Ok(()) -} - // Fills the db with dummy data in v1 scheme. -pub fn v1_to_v2_fill_test_data( +pub fn v1_fill_test_data( db: Arc, config: Config, dummy_candidate_create: F, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs index 66df6ee8f653..da42fc5be485 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/mod.rs @@ -21,145 +21,23 @@ use polkadot_node_primitives::approval::{v1::DelayTranche, v2::AssignmentCertV2} use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_node_subsystem_util::database::{DBTransaction, Database}; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, - ValidatorIndex, ValidatorSignature, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, + SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; -use crate::{ - backend::{Backend, BackendWriteOp, V1ReadBackend}, - persisted_entries, -}; +use crate::backend::V1ReadBackend; -const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; +use super::common::{block_entry_key, candidate_entry_key, load_decode, Config}; pub mod migration_helpers; #[cfg(test)] pub mod tests; -/// `DbBackend` is a concrete implementation of the higher-level Backend trait -pub struct DbBackend { - inner: Arc, - config: Config, -} - -impl DbBackend { - /// Create a new [`DbBackend`] with the supplied key-value store and - /// config. - pub fn new(db: Arc, config: Config) -> Self { - DbBackend { inner: db, config } - } -} - -impl V1ReadBackend for DbBackend { - fn load_candidate_entry_v1( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - load_candidate_entry_v1(&*self.inner, &self.config, candidate_hash) - .map(|e| e.map(Into::into)) - } - - fn load_block_entry_v1( - &self, - block_hash: &Hash, - ) -> SubsystemResult> { - load_block_entry_v1(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) - } -} - -impl Backend for DbBackend { - fn load_block_entry( - &self, - block_hash: &Hash, - ) -> SubsystemResult> { - load_block_entry(&*self.inner, &self.config, block_hash).map(|e| e.map(Into::into)) - } - - fn load_candidate_entry( - &self, - candidate_hash: &CandidateHash, - ) -> SubsystemResult> { - load_candidate_entry(&*self.inner, &self.config, candidate_hash).map(|e| e.map(Into::into)) - } - - fn load_blocks_at_height(&self, block_height: &BlockNumber) -> SubsystemResult> { - load_blocks_at_height(&*self.inner, &self.config, block_height) - } - - fn load_all_blocks(&self) -> SubsystemResult> { - load_all_blocks(&*self.inner, &self.config) - } - - fn load_stored_blocks(&self) -> SubsystemResult> { - load_stored_blocks(&*self.inner, &self.config) - } - - /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&mut self, ops: I) -> SubsystemResult<()> - where - I: IntoIterator, - { - let mut tx = DBTransaction::new(); - for op in ops { - match op { - BackendWriteOp::WriteStoredBlockRange(stored_block_range) => { - tx.put_vec( - self.config.col_approval_data, - &STORED_BLOCKS_KEY, - stored_block_range.encode(), - ); - }, - BackendWriteOp::DeleteStoredBlockRange => { - tx.delete(self.config.col_approval_data, &STORED_BLOCKS_KEY); - }, - BackendWriteOp::WriteBlocksAtHeight(h, blocks) => { - tx.put_vec( - self.config.col_approval_data, - &blocks_at_height_key(h), - blocks.encode(), - ); - }, - BackendWriteOp::DeleteBlocksAtHeight(h) => { - tx.delete(self.config.col_approval_data, &blocks_at_height_key(h)); - }, - BackendWriteOp::WriteBlockEntry(block_entry) => { - let block_entry: BlockEntry = block_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &block_entry_key(&block_entry.block_hash), - block_entry.encode(), - ); - }, - BackendWriteOp::DeleteBlockEntry(hash) => { - tx.delete(self.config.col_approval_data, &block_entry_key(&hash)); - }, - BackendWriteOp::WriteCandidateEntry(candidate_entry) => { - let candidate_entry: CandidateEntry = candidate_entry.into(); - tx.put_vec( - self.config.col_approval_data, - &candidate_entry_key(&candidate_entry.candidate.hash()), - candidate_entry.encode(), - ); - }, - BackendWriteOp::DeleteCandidateEntry(candidate_hash) => { - tx.delete(self.config.col_approval_data, &candidate_entry_key(&candidate_hash)); - }, - } - } - - self.inner.write(tx).map_err(|e| e.into()) - } -} - -/// A range from earliest..last block number stored within the DB. -#[derive(Encode, Decode, Debug, Clone, PartialEq)] -pub struct StoredBlockRange(pub BlockNumber, pub BlockNumber); - // slot_duration * 2 + DelayTranche gives the number of delay tranches since the // unix epoch. #[derive(Encode, Decode, Clone, Copy, Debug, PartialEq)] @@ -168,13 +46,6 @@ pub struct Tick(u64); /// Convenience type definition pub type Bitfield = BitVec; -/// The database config. -#[derive(Debug, Clone, Copy)] -pub struct Config { - /// The column family in the database where data is stored. - pub col_approval_data: u32, -} - /// Details pertaining to our assignment on a block. #[derive(Encode, Decode, Debug, Clone, PartialEq)] pub struct OurAssignment { @@ -259,118 +130,6 @@ impl From for crate::Tick { } } -/// Errors while accessing things from the DB. -#[derive(Debug, derive_more::From, derive_more::Display)] -pub enum Error { - Io(std::io::Error), - InvalidDecoding(parity_scale_codec::Error), - InternalError(SubsystemError), -} - -impl std::error::Error for Error {} - -/// Result alias for DB errors. -pub type Result = std::result::Result; - -pub(crate) fn load_decode( - store: &dyn Database, - col_approval_data: u32, - key: &[u8], -) -> Result> { - match store.get(col_approval_data, key)? { - None => Ok(None), - Some(raw) => D::decode(&mut &raw[..]).map(Some).map_err(Into::into), - } -} - -/// The key a given block entry is stored under. -pub(crate) fn block_entry_key(block_hash: &Hash) -> [u8; 46] { - const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(block_hash.as_ref()); - - key -} - -/// The key a given candidate entry is stored under. -pub(crate) fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { - const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; - - let mut key = [0u8; 14 + 32]; - key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); - key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); - - key -} - -/// The key a set of block hashes corresponding to a block number is stored under. -pub(crate) fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { - const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; - - let mut key = [0u8; 12 + 4]; - key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); - block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); - - key -} - -/// Return all blocks which have entries in the DB, ascending, by height. -pub fn load_all_blocks(store: &dyn Database, config: &Config) -> SubsystemResult> { - let mut hashes = Vec::new(); - if let Some(stored_blocks) = load_stored_blocks(store, config)? { - for height in stored_blocks.0..stored_blocks.1 { - let blocks = load_blocks_at_height(store, config, &height)?; - hashes.extend(blocks); - } - } - - Ok(hashes) -} - -/// Load the stored-blocks key from the state. -pub fn load_stored_blocks( - store: &dyn Database, - config: &Config, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, STORED_BLOCKS_KEY) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a blocks-at-height entry for a given block number. -pub fn load_blocks_at_height( - store: &dyn Database, - config: &Config, - block_number: &BlockNumber, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &blocks_at_height_key(*block_number)) - .map(|x| x.unwrap_or_default()) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a block entry from the aux store. -pub fn load_block_entry( - store: &dyn Database, - config: &Config, - block_hash: &Hash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - -/// Load a candidate entry from the aux store in current version format. -pub fn load_candidate_entry( - store: &dyn Database, - config: &Config, - candidate_hash: &CandidateHash, -) -> SubsystemResult> { - load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) - .map(|u: Option| u.map(|v| v.into())) - .map_err(|e| SubsystemError::with_origin("approval-voting", e)) -} - /// Load a candidate entry from the aux store in v1 format. pub fn load_candidate_entry_v1( store: &dyn Database, diff --git a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs index 50a5a924ca8d..6021b44c2765 100644 --- a/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs +++ b/polkadot/node/core/approval-voting/src/approval_db/v2/tests.rs @@ -16,13 +16,22 @@ //! Tests for the aux-schema of approval voting. -use super::{DbBackend, StoredBlockRange, *}; use crate::{ + approval_db::{ + common::{migration_helpers::make_bitvec, DbBackend, StoredBlockRange, *}, + v2::*, + v3::{load_block_entry_v2, load_candidate_entry_v2}, + }, backend::{Backend, OverlayedBackend}, ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo}, }; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, +}; + use polkadot_node_subsystem_util::database::Database; use polkadot_primitives::Id as ParaId; +use sp_consensus_slots::Slot; use std::{collections::HashMap, sync::Arc}; use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; @@ -60,10 +69,6 @@ fn make_block_entry( } } -fn make_bitvec(len: usize) -> BitVec { - bitvec::bitvec![u8, BitOrderLsb0; 0; len] -} - fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { let mut c = dummy_candidate_receipt(dummy_hash()); @@ -110,7 +115,10 @@ fn read_write() { overlay_db.write_stored_block_range(range.clone()); overlay_db.write_blocks_at_height(1, at_height.clone()); overlay_db.write_block_entry(block_entry.clone().into()); - overlay_db.write_candidate_entry(candidate_entry.clone().into()); + overlay_db.write_candidate_entry(crate::persisted_entries::CandidateEntry::from_v2( + candidate_entry.clone(), + 0, + )); let write_ops = overlay_db.into_write_ops(); db.write(write_ops).unwrap(); @@ -118,11 +126,11 @@ fn read_write() { assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range)); assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(), Some(block_entry.into()) ); assert_eq!( - load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(), + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(), Some(candidate_entry.into()), ); @@ -134,8 +142,8 @@ fn read_write() { db.write(write_ops).unwrap(); assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none()); - assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none()); + assert!(load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash) .unwrap() .is_none()); } @@ -196,25 +204,27 @@ fn add_block_entry_works() { db.write(write_ops).unwrap(); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), Some(block_entry_a.into()) ); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), Some(block_entry_b.into()) ); - let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a) - .unwrap() - .unwrap(); + let candidate_entry_a = + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash_a) + .unwrap() + .unwrap(); assert_eq!( candidate_entry_a.block_assignments.keys().collect::>(), vec![&block_hash_a, &block_hash_b] ); - let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b) - .unwrap() - .unwrap(); + let candidate_entry_b = + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &candidate_hash_b) + .unwrap() + .unwrap(); assert_eq!(candidate_entry_b.block_assignments.keys().collect::>(), vec![&block_hash_b]); } @@ -243,11 +253,11 @@ fn add_block_entry_adds_child() { block_entry_a.children.push(block_hash_b); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), Some(block_entry_a.into()) ); assert_eq!( - load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), Some(block_entry_b.into()) ); } @@ -365,13 +375,15 @@ fn canonicalize_works() { for (c_hash, in_blocks) in expected { let (entry, in_blocks) = match in_blocks { None => { - assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash) + assert!(load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &c_hash) .unwrap() .is_none()); continue }, Some(i) => ( - load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(), + load_candidate_entry_v2(store.as_ref(), &TEST_CONFIG, &c_hash) + .unwrap() + .unwrap(), i, ), }; @@ -388,13 +400,13 @@ fn canonicalize_works() { for (hash, with_candidates) in expected { let (entry, with_candidates) = match with_candidates { None => { - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash) .unwrap() .is_none()); continue }, Some(i) => - (load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i), + (load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i), }; assert_eq!(entry.candidates.len(), with_candidates.len()); @@ -510,22 +522,22 @@ fn force_approve_works() { let write_ops = overlay_db.into_write_ops(); db.write(write_ops).unwrap(); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_a,) .unwrap() .unwrap() .approved_bitfield .all()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_b,) .unwrap() .unwrap() .approved_bitfield .all()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_c,) .unwrap() .unwrap() .approved_bitfield .not_any()); - assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,) + assert!(load_block_entry_v2(store.as_ref(), &TEST_CONFIG, &block_hash_d,) .unwrap() .unwrap() .approved_bitfield diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs new file mode 100644 index 000000000000..ad5e89ef3de8 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/migration_helpers.rs @@ -0,0 +1,237 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Approval DB migration helpers. +use super::*; +use crate::{ + approval_db::common::{ + block_entry_key, candidate_entry_key, + migration_helpers::{dummy_assignment_cert, make_bitvec}, + Config, Error, Result, StoredBlockRange, + }, + backend::{Backend, V2ReadBackend}, +}; +use polkadot_node_primitives::approval::v1::AssignmentCertKind; +use polkadot_node_subsystem_util::database::Database; +use sp_application_crypto::sp_core::H256; +use std::{collections::HashSet, sync::Arc}; + +/// Migrates `BlockEntry`, `CandidateEntry`, `ApprovalEntry` and `OurApproval` to version 3. +/// Returns on any error. +/// Must only be used in parachains DB migration code - `polkadot-service` crate. +pub fn v2_to_latest(db: Arc, config: Config) -> Result<()> { + let mut backend = crate::DbBackend::new(db, config); + let all_blocks = backend + .load_all_blocks() + .map_err(|e| Error::InternalError(e))? + .iter() + .filter_map(|block_hash| { + backend + .load_block_entry_v2(block_hash) + .map_err(|e| Error::InternalError(e)) + .ok()? + }) + .collect::>(); + + gum::info!( + target: crate::LOG_TARGET, + "Migrating candidate entries on top of {} blocks", + all_blocks.len() + ); + + let mut overlay = crate::OverlayedBackend::new(&backend); + let mut counter = 0; + // Get all candidate entries, approval entries and convert each of them. + for block in all_blocks { + for (candidate_index, (_core_index, candidate_hash)) in + block.candidates().iter().enumerate() + { + // Loading the candidate will also perform the conversion to the updated format and + // return that represantation. + if let Some(candidate_entry) = backend + .load_candidate_entry_v2(&candidate_hash, candidate_index as CandidateIndex) + .map_err(|e| Error::InternalError(e))? + { + // Write the updated representation. + overlay.write_candidate_entry(candidate_entry); + counter += 1; + } + } + overlay.write_block_entry(block); + } + + gum::info!(target: crate::LOG_TARGET, "Migrated {} entries", counter); + + // Commit all changes to DB. + let write_ops = overlay.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(()) +} + +// Checks if the migration doesn't leave the DB in an unsane state. +// This function is to be used in tests. +pub fn v1_to_latest_sanity_check( + db: Arc, + config: Config, + expected_candidates: HashSet, +) -> Result<()> { + let backend = crate::DbBackend::new(db, config); + + let all_blocks = backend + .load_all_blocks() + .unwrap() + .iter() + .map(|block_hash| backend.load_block_entry(block_hash).unwrap().unwrap()) + .collect::>(); + + let mut candidates = HashSet::new(); + + // Iterate all blocks and approval entries. + for block in all_blocks { + for (_core_index, candidate_hash) in block.candidates() { + // Loading the candidate will also perform the conversion to the updated format and + // return that represantation. + if let Some(candidate_entry) = backend.load_candidate_entry(&candidate_hash).unwrap() { + candidates.insert(candidate_entry.candidate.hash()); + } + } + } + + assert_eq!(candidates, expected_candidates); + + Ok(()) +} + +// Fills the db with dummy data in v2 scheme. +pub fn v2_fill_test_data( + db: Arc, + config: Config, + dummy_candidate_create: F, +) -> Result> +where + F: Fn(H256) -> CandidateReceipt, +{ + let mut backend = crate::DbBackend::new(db.clone(), config); + let mut overlay_db = crate::OverlayedBackend::new(&backend); + let mut expected_candidates = HashSet::new(); + + const RELAY_BLOCK_COUNT: u32 = 10; + + let range = StoredBlockRange(1, 11); + overlay_db.write_stored_block_range(range.clone()); + + for relay_number in 1..=RELAY_BLOCK_COUNT { + let relay_hash = Hash::repeat_byte(relay_number as u8); + let assignment_core_index = CoreIndex(relay_number); + let candidate = dummy_candidate_create(relay_hash); + let candidate_hash = candidate.hash(); + + let at_height = vec![relay_hash]; + + let block_entry = make_block_entry_v2( + relay_hash, + Default::default(), + relay_number, + vec![(assignment_core_index, candidate_hash)], + ); + + let dummy_assignment = crate::approval_db::v2::OurAssignment { + cert: dummy_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }).into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + }; + + let candidate_entry = crate::approval_db::v2::CandidateEntry { + candidate, + session: 123, + block_assignments: vec![( + relay_hash, + crate::approval_db::v2::ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + our_assignment: Some(dummy_assignment), + our_approval_sig: None, + approved: false, + assigned_validators: make_bitvec(1), + }, + )] + .into_iter() + .collect(), + approvals: Default::default(), + }; + + overlay_db.write_blocks_at_height(relay_number, at_height.clone()); + expected_candidates.insert(candidate_entry.candidate.hash()); + + db.write(write_candidate_entry_v2(candidate_entry, config)).unwrap(); + db.write(write_block_entry_v2(block_entry, config)).unwrap(); + } + + let write_ops = overlay_db.into_write_ops(); + backend.write(write_ops).unwrap(); + + Ok(expected_candidates) +} + +fn make_block_entry_v2( + block_hash: Hash, + parent_hash: Hash, + block_number: BlockNumber, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> crate::approval_db::v2::BlockEntry { + crate::approval_db::v2::BlockEntry { + block_hash, + parent_hash, + block_number, + session: 1, + slot: Slot::from(1), + relay_vrf_story: [0u8; 32], + approved_bitfield: make_bitvec(candidates.len()), + distributed_assignments: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + } +} + +// Low level DB helper to write a candidate entry in v1 scheme. +fn write_candidate_entry_v2( + candidate_entry: crate::approval_db::v2::CandidateEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &candidate_entry_key(&candidate_entry.candidate.hash()), + candidate_entry.encode(), + ); + tx +} + +// Low level DB helper to write a block entry in v1 scheme. +fn write_block_entry_v2( + block_entry: crate::approval_db::v2::BlockEntry, + config: Config, +) -> DBTransaction { + let mut tx = DBTransaction::new(); + tx.put_vec( + config.col_approval_data, + &block_entry_key(&block_entry.block_hash), + block_entry.encode(), + ); + tx +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs new file mode 100644 index 000000000000..3e4f43021952 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/mod.rs @@ -0,0 +1,137 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Version 3 of the DB schema. +//! +//! Version 3 modifies the `our_approval` format of `ApprovalEntry` +//! and adds a new field `pending_signatures` for `BlockEntry` + +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_primitives::approval::v2::CandidateBitfield; +use polkadot_node_subsystem::SubsystemResult; +use polkadot_node_subsystem_util::database::{DBTransaction, Database}; +use polkadot_overseer::SubsystemError; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, + SessionIndex, ValidatorIndex, ValidatorSignature, +}; + +use sp_consensus_slots::Slot; + +use std::collections::BTreeMap; + +use super::common::{block_entry_key, candidate_entry_key, load_decode, Config}; + +/// Re-export this structs as v3 since they did not change between v2 and v3. +pub use super::v2::{Bitfield, OurAssignment, Tick, TrancheEntry}; + +pub mod migration_helpers; + +#[cfg(test)] +pub mod tests; + +/// Metadata about our approval signature +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct OurApproval { + /// The signature for the candidates hashes pointed by indices. + pub signature: ValidatorSignature, + /// The indices of the candidates signed in this approval. + pub signed_candidates_indices: CandidateBitfield, +} + +/// Metadata regarding approval of a particular candidate within the context of some +/// particular block. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct ApprovalEntry { + pub tranches: Vec, + pub backing_group: GroupIndex, + pub our_assignment: Option, + pub our_approval_sig: Option, + // `n_validators` bits. + pub assigned_validators: Bitfield, + pub approved: bool, +} + +/// Metadata regarding approval of a particular candidate. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct CandidateEntry { + pub candidate: CandidateReceipt, + pub session: SessionIndex, + // Assignments are based on blocks, so we need to track assignments separately + // based on the block we are looking at. + pub block_assignments: BTreeMap, + pub approvals: Bitfield, +} + +/// Metadata regarding approval of a particular block, by way of approval of the +/// candidates contained within it. +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +pub struct BlockEntry { + pub block_hash: Hash, + pub block_number: BlockNumber, + pub parent_hash: Hash, + pub session: SessionIndex, + pub slot: Slot, + /// Random bytes derived from the VRF submitted within the block by the block + /// author as a credential and used as input to approval assignment criteria. + pub relay_vrf_story: [u8; 32], + // The candidates included as-of this block and the index of the core they are + // leaving. Sorted ascending by core index. + pub candidates: Vec<(CoreIndex, CandidateHash)>, + // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. + // The i'th bit is `true` iff the candidate has been approved in the context of this + // block. The block can be considered approved if the bitfield has all bits set to `true`. + pub approved_bitfield: Bitfield, + pub children: Vec, + // A list of candidates we have checked, but didn't not sign and + // advertise the vote yet. + pub candidates_pending_signature: BTreeMap, + // Assignments we already distributed. A 1 bit means the candidate index for which + // we already have sent out an assignment. We need this to avoid distributing + // multiple core assignments more than once. + pub distributed_assignments: Bitfield, +} + +#[derive(Encode, Decode, Debug, Clone, PartialEq)] +/// Context needed for creating an approval signature for a given candidate. +pub struct CandidateSigningContext { + /// The candidate hash, to be included in the signature. + pub candidate_hash: CandidateHash, + /// The latest tick we have to create and send the approval. + pub sign_no_later_than_tick: Tick, +} + +/// Load a candidate entry from the aux store in v2 format. +pub fn load_candidate_entry_v2( + store: &dyn Database, + config: &Config, + candidate_hash: &CandidateHash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &candidate_entry_key(candidate_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} + +/// Load a block entry from the aux store in v2 format. +pub fn load_block_entry_v2( + store: &dyn Database, + config: &Config, + block_hash: &Hash, +) -> SubsystemResult> { + load_decode(store, config.col_approval_data, &block_entry_key(block_hash)) + .map(|u: Option| u.map(|v| v.into())) + .map_err(|e| SubsystemError::with_origin("approval-voting", e)) +} diff --git a/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs new file mode 100644 index 000000000000..08c65461bca8 --- /dev/null +++ b/polkadot/node/core/approval-voting/src/approval_db/v3/tests.rs @@ -0,0 +1,575 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the aux-schema of approval voting. + +use crate::{ + approval_db::{ + common::{migration_helpers::make_bitvec, DbBackend, StoredBlockRange, *}, + v3::*, + }, + backend::{Backend, OverlayedBackend}, + ops::{add_block_entry, canonicalize, force_approve, NewCandidateInfo}, +}; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, +}; + +use polkadot_node_subsystem_util::database::Database; +use polkadot_primitives::Id as ParaId; +use sp_consensus_slots::Slot; +use std::{collections::HashMap, sync::Arc}; + +use ::test_helpers::{dummy_candidate_receipt, dummy_candidate_receipt_bad_sig, dummy_hash}; + +const DATA_COL: u32 = 0; + +const NUM_COLUMNS: u32 = 1; + +const TEST_CONFIG: Config = Config { col_approval_data: DATA_COL }; + +fn make_db() -> (DbBackend, Arc) { + let db = kvdb_memorydb::create(NUM_COLUMNS); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]); + let db_writer: Arc = Arc::new(db); + (DbBackend::new(db_writer.clone(), TEST_CONFIG), db_writer) +} + +fn make_block_entry( + block_hash: Hash, + parent_hash: Hash, + block_number: BlockNumber, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> BlockEntry { + BlockEntry { + block_hash, + parent_hash, + block_number, + session: 1, + slot: Slot::from(1), + relay_vrf_story: [0u8; 32], + approved_bitfield: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + candidates_pending_signature: Default::default(), + distributed_assignments: Default::default(), + } +} + +fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { + let mut c = dummy_candidate_receipt(dummy_hash()); + + c.descriptor.para_id = para_id; + c.descriptor.relay_parent = relay_parent; + + c +} + +#[test] +fn read_write() { + let (mut db, store) = make_db(); + + let hash_a = Hash::repeat_byte(1); + let hash_b = Hash::repeat_byte(2); + let candidate_hash = dummy_candidate_receipt_bad_sig(dummy_hash(), None).hash(); + + let range = StoredBlockRange(10, 20); + let at_height = vec![hash_a, hash_b]; + + let block_entry = + make_block_entry(hash_a, Default::default(), 1, vec![(CoreIndex(0), candidate_hash)]); + + let candidate_entry = CandidateEntry { + candidate: dummy_candidate_receipt_bad_sig(dummy_hash(), None), + session: 5, + block_assignments: vec![( + hash_a, + ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + our_assignment: None, + our_approval_sig: None, + assigned_validators: Default::default(), + approved: false, + }, + )] + .into_iter() + .collect(), + approvals: Default::default(), + }; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(range.clone()); + overlay_db.write_blocks_at_height(1, at_height.clone()); + overlay_db.write_block_entry(block_entry.clone().into()); + overlay_db.write_candidate_entry(candidate_entry.clone().into()); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!(load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), Some(range)); + assert_eq!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap(), at_height); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap(), + Some(block_entry.into()) + ); + assert_eq!( + load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash).unwrap(), + Some(candidate_entry.into()), + ); + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.delete_blocks_at_height(1); + overlay_db.delete_block_entry(&hash_a); + overlay_db.delete_candidate_entry(&candidate_hash); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert!(load_blocks_at_height(store.as_ref(), &TEST_CONFIG, &1).unwrap().is_empty()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash_a).unwrap().is_none()); + assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash) + .unwrap() + .is_none()); +} + +#[test] +fn add_block_entry_works() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let candidate_receipt_a = make_candidate(ParaId::from(1_u32), parent_hash); + let candidate_receipt_b = make_candidate(ParaId::from(2_u32), parent_hash); + + let candidate_hash_a = candidate_receipt_a.hash(); + let candidate_hash_b = candidate_receipt_b.hash(); + + let block_number = 10; + + let block_entry_a = make_block_entry( + block_hash_a, + parent_hash, + block_number, + vec![(CoreIndex(0), candidate_hash_a)], + ); + + let block_entry_b = make_block_entry( + block_hash_b, + parent_hash, + block_number, + vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)], + ); + + let n_validators = 10; + + let mut new_candidate_info = HashMap::new(); + new_candidate_info + .insert(candidate_hash_a, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(0), None)); + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |h| { + new_candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + new_candidate_info + .insert(candidate_hash_b, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(1), None)); + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |h| { + new_candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + Some(block_entry_a.into()) + ); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + Some(block_entry_b.into()) + ); + + let candidate_entry_a = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_a) + .unwrap() + .unwrap(); + assert_eq!( + candidate_entry_a.block_assignments.keys().collect::>(), + vec![&block_hash_a, &block_hash_b] + ); + + let candidate_entry_b = load_candidate_entry(store.as_ref(), &TEST_CONFIG, &candidate_hash_b) + .unwrap() + .unwrap(); + assert_eq!(candidate_entry_b.block_assignments.keys().collect::>(), vec![&block_hash_b]); +} + +#[test] +fn add_block_entry_adds_child() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let mut block_entry_a = make_block_entry(block_hash_a, parent_hash, 1, Vec::new()); + + let block_entry_b = make_block_entry(block_hash_b, block_hash_a, 2, Vec::new()); + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap(); + + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + block_entry_a.children.push(block_hash_b); + + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a).unwrap(), + Some(block_entry_a.into()) + ); + assert_eq!( + load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b).unwrap(), + Some(block_entry_b.into()) + ); +} + +#[test] +fn canonicalize_works() { + let (mut db, store) = make_db(); + + // -> B1 -> C1 -> D1 + // A -> B2 -> C2 -> D2 + // + // We'll canonicalize C1. Everytning except D1 should disappear. + // + // Candidates: + // Cand1 in B2 + // Cand2 in C2 + // Cand3 in C2 and D1 + // Cand4 in D1 + // Cand5 in D2 + // Only Cand3 and Cand4 should remain after canonicalize. + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(StoredBlockRange(1, 5)); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let genesis = Hash::repeat_byte(0); + + let block_hash_a = Hash::repeat_byte(1); + let block_hash_b1 = Hash::repeat_byte(2); + let block_hash_b2 = Hash::repeat_byte(3); + let block_hash_c1 = Hash::repeat_byte(4); + let block_hash_c2 = Hash::repeat_byte(5); + let block_hash_d1 = Hash::repeat_byte(6); + let block_hash_d2 = Hash::repeat_byte(7); + + let candidate_receipt_genesis = make_candidate(ParaId::from(1_u32), genesis); + let candidate_receipt_a = make_candidate(ParaId::from(2_u32), block_hash_a); + let candidate_receipt_b = make_candidate(ParaId::from(3_u32), block_hash_a); + let candidate_receipt_b1 = make_candidate(ParaId::from(4_u32), block_hash_b1); + let candidate_receipt_c1 = make_candidate(ParaId::from(5_u32), block_hash_c1); + + let cand_hash_1 = candidate_receipt_genesis.hash(); + let cand_hash_2 = candidate_receipt_a.hash(); + let cand_hash_3 = candidate_receipt_b.hash(); + let cand_hash_4 = candidate_receipt_b1.hash(); + let cand_hash_5 = candidate_receipt_c1.hash(); + + let block_entry_a = make_block_entry(block_hash_a, genesis, 1, Vec::new()); + let block_entry_b1 = make_block_entry(block_hash_b1, block_hash_a, 2, Vec::new()); + let block_entry_b2 = + make_block_entry(block_hash_b2, block_hash_a, 2, vec![(CoreIndex(0), cand_hash_1)]); + let block_entry_c1 = make_block_entry(block_hash_c1, block_hash_b1, 3, Vec::new()); + let block_entry_c2 = make_block_entry( + block_hash_c2, + block_hash_b2, + 3, + vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)], + ); + let block_entry_d1 = make_block_entry( + block_hash_d1, + block_hash_c1, + 4, + vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)], + ); + let block_entry_d2 = + make_block_entry(block_hash_d2, block_hash_c2, 4, vec![(CoreIndex(0), cand_hash_5)]); + + let candidate_info = { + let mut candidate_info = HashMap::new(); + candidate_info.insert( + cand_hash_1, + NewCandidateInfo::new(candidate_receipt_genesis, GroupIndex(1), None), + ); + + candidate_info + .insert(cand_hash_2, NewCandidateInfo::new(candidate_receipt_a, GroupIndex(2), None)); + + candidate_info + .insert(cand_hash_3, NewCandidateInfo::new(candidate_receipt_b, GroupIndex(3), None)); + + candidate_info + .insert(cand_hash_4, NewCandidateInfo::new(candidate_receipt_b1, GroupIndex(4), None)); + + candidate_info + .insert(cand_hash_5, NewCandidateInfo::new(candidate_receipt_c1, GroupIndex(5), None)); + + candidate_info + }; + + // now insert all the blocks. + let blocks = vec![ + block_entry_a.clone(), + block_entry_b1.clone(), + block_entry_b2.clone(), + block_entry_c1.clone(), + block_entry_c2.clone(), + block_entry_d1.clone(), + block_entry_d2.clone(), + ]; + + let mut overlay_db = OverlayedBackend::new(&db); + for block_entry in blocks { + add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| { + candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + } + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let check_candidates_in_store = |expected: Vec<(CandidateHash, Option>)>| { + for (c_hash, in_blocks) in expected { + let (entry, in_blocks) = match in_blocks { + None => { + assert!(load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash) + .unwrap() + .is_none()); + continue + }, + Some(i) => ( + load_candidate_entry(store.as_ref(), &TEST_CONFIG, &c_hash).unwrap().unwrap(), + i, + ), + }; + + assert_eq!(entry.block_assignments.len(), in_blocks.len()); + + for x in in_blocks { + assert!(entry.block_assignments.contains_key(&x)); + } + } + }; + + let check_blocks_in_store = |expected: Vec<(Hash, Option>)>| { + for (hash, with_candidates) in expected { + let (entry, with_candidates) = match with_candidates { + None => { + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &hash) + .unwrap() + .is_none()); + continue + }, + Some(i) => + (load_block_entry(store.as_ref(), &TEST_CONFIG, &hash).unwrap().unwrap(), i), + }; + + assert_eq!(entry.candidates.len(), with_candidates.len()); + + for x in with_candidates { + assert!(entry.candidates.iter().any(|(_, c)| c == &x)); + } + } + }; + + check_candidates_in_store(vec![ + (cand_hash_1, Some(vec![block_hash_b2])), + (cand_hash_2, Some(vec![block_hash_c2])), + (cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, Some(vec![block_hash_d2])), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, Some(vec![])), + (block_hash_b1, Some(vec![])), + (block_hash_b2, Some(vec![cand_hash_1])), + (block_hash_c1, Some(vec![])), + (block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, Some(vec![cand_hash_5])), + ]); + + let mut overlay_db = OverlayedBackend::new(&db); + canonicalize(&mut overlay_db, 3, block_hash_c1).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_stored_blocks(store.as_ref(), &TEST_CONFIG).unwrap().unwrap(), + StoredBlockRange(4, 5) + ); + + check_candidates_in_store(vec![ + (cand_hash_1, None), + (cand_hash_2, None), + (cand_hash_3, Some(vec![block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, None), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, None), + (block_hash_b1, None), + (block_hash_b2, None), + (block_hash_c1, None), + (block_hash_c2, None), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, None), + ]); +} + +#[test] +fn force_approve_works() { + let (mut db, store) = make_db(); + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + overlay_db.write_stored_block_range(StoredBlockRange(1, 4)); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let single_candidate_vec = vec![(CoreIndex(0), candidate_hash)]; + let candidate_info = { + let mut candidate_info = HashMap::new(); + candidate_info.insert( + candidate_hash, + NewCandidateInfo::new( + make_candidate(ParaId::from(1_u32), Default::default()), + GroupIndex(1), + None, + ), + ); + + candidate_info + }; + + let block_hash_a = Hash::repeat_byte(1); // 1 + let block_hash_b = Hash::repeat_byte(2); + let block_hash_c = Hash::repeat_byte(3); + let block_hash_d = Hash::repeat_byte(4); // 4 + + let block_entry_a = + make_block_entry(block_hash_a, Default::default(), 1, single_candidate_vec.clone()); + let block_entry_b = + make_block_entry(block_hash_b, block_hash_a, 2, single_candidate_vec.clone()); + let block_entry_c = + make_block_entry(block_hash_c, block_hash_b, 3, single_candidate_vec.clone()); + let block_entry_d = + make_block_entry(block_hash_d, block_hash_c, 4, single_candidate_vec.clone()); + + let blocks = vec![ + block_entry_a.clone(), + block_entry_b.clone(), + block_entry_c.clone(), + block_entry_d.clone(), + ]; + + let mut overlay_db = OverlayedBackend::new(&db); + for block_entry in blocks { + add_block_entry(&mut overlay_db, block_entry.into(), n_validators, |h| { + candidate_info.get(h).map(|x| x.clone()) + }) + .unwrap(); + } + let approved_hashes = force_approve(&mut overlay_db, block_hash_d, 2).unwrap(); + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_a,) + .unwrap() + .unwrap() + .approved_bitfield + .all()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_b,) + .unwrap() + .unwrap() + .approved_bitfield + .all()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_c,) + .unwrap() + .unwrap() + .approved_bitfield + .not_any()); + assert!(load_block_entry(store.as_ref(), &TEST_CONFIG, &block_hash_d,) + .unwrap() + .unwrap() + .approved_bitfield + .not_any()); + assert_eq!(approved_hashes, vec![block_hash_b, block_hash_a]); +} + +#[test] +fn load_all_blocks_works() { + let (mut db, store) = make_db(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + let block_hash_c = Hash::repeat_byte(42); + + let block_number = 10; + + let block_entry_a = make_block_entry(block_hash_a, parent_hash, block_number, vec![]); + + let block_entry_b = make_block_entry(block_hash_b, parent_hash, block_number, vec![]); + + let block_entry_c = make_block_entry(block_hash_c, block_hash_a, block_number + 1, vec![]); + + let n_validators = 10; + + let mut overlay_db = OverlayedBackend::new(&db); + add_block_entry(&mut overlay_db, block_entry_a.clone().into(), n_validators, |_| None).unwrap(); + + // add C before B to test sorting. + add_block_entry(&mut overlay_db, block_entry_c.clone().into(), n_validators, |_| None).unwrap(); + + add_block_entry(&mut overlay_db, block_entry_b.clone().into(), n_validators, |_| None).unwrap(); + + let write_ops = overlay_db.into_write_ops(); + db.write(write_ops).unwrap(); + + assert_eq!( + load_all_blocks(store.as_ref(), &TEST_CONFIG).unwrap(), + vec![block_hash_a, block_hash_b, block_hash_c], + ) +} diff --git a/polkadot/node/core/approval-voting/src/backend.rs b/polkadot/node/core/approval-voting/src/backend.rs index d98f3c5fd202..9ce25334c0fa 100644 --- a/polkadot/node/core/approval-voting/src/backend.rs +++ b/polkadot/node/core/approval-voting/src/backend.rs @@ -22,12 +22,12 @@ //! before any commit to the underlying storage is made. use polkadot_node_subsystem::SubsystemResult; -use polkadot_primitives::{BlockNumber, CandidateHash, Hash}; +use polkadot_primitives::{BlockNumber, CandidateHash, CandidateIndex, Hash}; use std::collections::HashMap; use super::{ - approval_db::v2::StoredBlockRange, + approval_db::common::StoredBlockRange, persisted_entries::{BlockEntry, CandidateEntry}, }; @@ -72,12 +72,26 @@ pub trait V1ReadBackend: Backend { fn load_candidate_entry_v1( &self, candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, ) -> SubsystemResult>; /// Load a block entry from the DB with scheme version 1. fn load_block_entry_v1(&self, block_hash: &Hash) -> SubsystemResult>; } +/// A read only backend to enable db migration from version 2 of DB. +pub trait V2ReadBackend: Backend { + /// Load a candidate entry from the DB with scheme version 1. + fn load_candidate_entry_v2( + &self, + candidate_hash: &CandidateHash, + candidate_index: CandidateIndex, + ) -> SubsystemResult>; + + /// Load a block entry from the DB with scheme version 1. + fn load_block_entry_v2(&self, block_hash: &Hash) -> SubsystemResult>; +} + // Status of block range in the `OverlayedBackend`. #[derive(PartialEq)] enum BlockRangeStatus { diff --git a/polkadot/node/core/approval-voting/src/import.rs b/polkadot/node/core/approval-voting/src/import.rs index a64685326cf9..7a56e9fd1129 100644 --- a/polkadot/node/core/approval-voting/src/import.rs +++ b/polkadot/node/core/approval-voting/src/import.rs @@ -56,7 +56,7 @@ use futures::{channel::oneshot, prelude::*}; use std::collections::HashMap; -use super::approval_db::v2; +use super::approval_db::v3; use crate::{ backend::{Backend, OverlayedBackend}, criteria::{AssignmentCriteria, OurAssignment}, @@ -512,7 +512,7 @@ pub(crate) async fn handle_new_head( ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await; } - let block_entry = v2::BlockEntry { + let block_entry = v3::BlockEntry { block_hash, parent_hash: block_header.parent_hash, block_number: block_header.number, @@ -525,6 +525,7 @@ pub(crate) async fn handle_new_head( .collect(), approved_bitfield, children: Vec::new(), + candidates_pending_signature: Default::default(), distributed_assignments: Default::default(), }; @@ -604,7 +605,10 @@ pub(crate) async fn handle_new_head( #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{approval_db::v2::DbBackend, RuntimeInfo, RuntimeInfoConfig}; + use crate::{ + approval_db::common::{load_block_entry, DbBackend}, + RuntimeInfo, RuntimeInfoConfig, + }; use ::test_helpers::{dummy_candidate_receipt, dummy_hash}; use assert_matches::assert_matches; use polkadot_node_primitives::{ @@ -627,7 +631,7 @@ pub(crate) mod tests { pub(crate) use sp_runtime::{Digest, DigestItem}; use std::{pin::Pin, sync::Arc}; - use crate::{approval_db::v2::Config as DatabaseConfig, criteria, BlockEntry}; + use crate::{approval_db::common::Config as DatabaseConfig, criteria, BlockEntry}; const DATA_COL: u32 = 0; @@ -1347,7 +1351,7 @@ pub(crate) mod tests { let (state, mut session_info_provider) = single_session_state(); overlay_db.write_block_entry( - v2::BlockEntry { + v3::BlockEntry { block_hash: parent_hash, parent_hash: Default::default(), block_number: 4, @@ -1357,6 +1361,7 @@ pub(crate) mod tests { candidates: Vec::new(), approved_bitfield: Default::default(), children: Vec::new(), + candidates_pending_signature: Default::default(), distributed_assignments: Default::default(), } .into(), @@ -1389,11 +1394,10 @@ pub(crate) mod tests { assert_eq!(candidates[1].1.approvals().len(), 6); // the first candidate should be insta-approved // the second should not - let entry: BlockEntry = - v2::load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) - .unwrap() - .unwrap() - .into(); + let entry: BlockEntry = load_block_entry(db_writer.as_ref(), &TEST_CONFIG, &hash) + .unwrap() + .unwrap() + .into(); assert!(entry.is_candidate_approved(&candidates[0].0)); assert!(!entry.is_candidate_approved(&candidates[1].0)); }) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index ef01727b7eb6..af76b576d7ca 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -21,14 +21,15 @@ //! of others. It uses this information to determine when candidates and blocks have //! been sufficiently approved to finalize. +use itertools::Itertools; use jaeger::{hash_to_trace_identifier, PerLeafSpan}; use polkadot_node_jaeger as jaeger; use polkadot_node_primitives::{ approval::{ - v1::{BlockApprovalMeta, DelayTranche, IndirectSignedApprovalVote}, + v1::{BlockApprovalMeta, DelayTranche}, v2::{ AssignmentCertKindV2, BitfieldError, CandidateBitfield, CoreBitfield, - IndirectAssignmentCertV2, + IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2, }, }, ValidationResult, DISPUTE_WINDOW, @@ -53,9 +54,10 @@ use polkadot_node_subsystem_util::{ TimeoutExt, }; use polkadot_primitives::{ - ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, - ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + vstaging::{ApprovalVoteMultipleCandidates, ApprovalVotingParams}, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, ExecutorParams, + GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair; @@ -67,9 +69,11 @@ use futures::{ future::{BoxFuture, RemoteHandle}, prelude::*, stream::FuturesUnordered, + StreamExt, }; use std::{ + cmp::min, collections::{ btree_map::Entry as BTMEntry, hash_map::Entry as HMEntry, BTreeMap, HashMap, HashSet, }, @@ -83,7 +87,7 @@ use approval_checking::RequiredTranches; use bitvec::{order::Lsb0, vec::BitVec}; use criteria::{AssignmentCriteria, RealAssignmentCriteria}; use persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}; -use time::{slot_number_to_tick, Clock, ClockExt, SystemClock, Tick}; +use time::{slot_number_to_tick, Clock, ClockExt, DelayedApprovalTimer, SystemClock, Tick}; mod approval_checking; pub mod approval_db; @@ -95,9 +99,11 @@ mod persisted_entries; mod time; use crate::{ - approval_db::v2::{Config as DatabaseConfig, DbBackend}, + approval_checking::{Check, TranchesToApproveResult}, + approval_db::common::{Config as DatabaseConfig, DbBackend}, backend::{Backend, OverlayedBackend}, criteria::InvalidAssignmentReason, + persisted_entries::OurApproval, }; #[cfg(test)] @@ -115,6 +121,9 @@ const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds. const APPROVAL_DELAY: Tick = 2; pub(crate) const LOG_TARGET: &str = "parachain::approval-voting"; +// The max number of ticks we delay sending the approval after we are ready to issue the approval +const MAX_APPROVAL_COALESCE_WAIT_TICKS: Tick = 12; + /// Configuration for the approval voting subsystem #[derive(Debug, Clone)] pub struct Config { @@ -158,7 +167,14 @@ struct MetricsInner { assignments_produced: prometheus::Histogram, approvals_produced_total: prometheus::CounterVec, no_shows_total: prometheus::Counter, + // The difference from `no_shows_total` is that this counts all observed no-shows at any + // moment in time. While `no_shows_total` catches that the no-shows at the moment the candidate + // is approved, approvals might arrive late and `no_shows_total` wouldn't catch that number. + observed_no_shows: prometheus::Counter, + approved_by_one_third: prometheus::Counter, wakeups_triggered_total: prometheus::Counter, + coalesced_approvals_buckets: prometheus::Histogram, + coalesced_approvals_delay: prometheus::Histogram, candidate_approval_time_ticks: prometheus::Histogram, block_approval_time_ticks: prometheus::Histogram, time_db_transaction: prometheus::Histogram, @@ -184,6 +200,22 @@ impl Metrics { } } + fn on_approval_coalesce(&self, num_coalesced: u32) { + if let Some(metrics) = &self.0 { + // Count how many candidates we covered with this coalesced approvals, + // so that the heat-map really gives a good understanding of the scales. + for _ in 0..num_coalesced { + metrics.coalesced_approvals_buckets.observe(num_coalesced as f64) + } + } + } + + fn on_delayed_approval(&self, delayed_ticks: u64) { + if let Some(metrics) = &self.0 { + metrics.coalesced_approvals_delay.observe(delayed_ticks as f64) + } + } + fn on_approval_stale(&self) { if let Some(metrics) = &self.0 { metrics.approvals_produced_total.with_label_values(&["stale"]).inc() @@ -220,6 +252,18 @@ impl Metrics { } } + fn on_observed_no_shows(&self, n: usize) { + if let Some(metrics) = &self.0 { + metrics.observed_no_shows.inc_by(n as u64); + } + } + + fn on_approved_by_one_third(&self) { + if let Some(metrics) = &self.0 { + metrics.approved_by_one_third.inc(); + } + } + fn on_wakeup(&self) { if let Some(metrics) = &self.0 { metrics.wakeups_triggered_total.inc(); @@ -297,6 +341,13 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + observed_no_shows: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_approvals_observed_no_shows_total", + "Number of observed no shows at any moment in time", + )?, + registry, + )?, wakeups_triggered_total: prometheus::register( prometheus::Counter::new( "polkadot_parachain_approvals_wakeups_total", @@ -313,6 +364,31 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + coalesced_approvals_buckets: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_approvals_coalesced_approvals_buckets", + "Number of coalesced approvals.", + ).buckets(vec![1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5]), + )?, + registry, + )?, + coalesced_approvals_delay: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_approvals_coalescing_delay", + "Number of ticks we delay the sending of a candidate approval", + ).buckets(vec![1.1, 2.1, 3.1, 4.1, 6.1, 8.1, 12.1, 20.1, 32.1]), + )?, + registry, + )?, + approved_by_one_third: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_approved_by_one_third", + "Number of candidates where more than one third had to vote ", + )?, + registry, + )?, block_approval_time_ticks: prometheus::register( prometheus::Histogram::with_opts( prometheus::HistogramOpts::new( @@ -383,8 +459,8 @@ impl ApprovalVotingSubsystem { /// The operation is not allowed for blocks older than the last finalized one. pub fn revert_to(&self, hash: Hash) -> Result<(), SubsystemError> { let config = - approval_db::v2::Config { col_approval_data: self.db_config.col_approval_data }; - let mut backend = approval_db::v2::DbBackend::new(self.db.clone(), config); + approval_db::common::Config { col_approval_data: self.db_config.col_approval_data }; + let mut backend = approval_db::common::DbBackend::new(self.db.clone(), config); let mut overlay = OverlayedBackend::new(&backend); ops::revert_to(&mut overlay, hash)?; @@ -559,6 +635,7 @@ struct ApprovalStatus { required_tranches: RequiredTranches, tranche_now: DelayTranche, block_tick: Tick, + last_no_shows: usize, } #[derive(Copy, Clone)] @@ -733,22 +810,73 @@ impl State { ); if let Some(approval_entry) = candidate_entry.approval_entry(&block_hash) { - let required_tranches = approval_checking::tranches_to_approve( - approval_entry, - candidate_entry.approvals(), - tranche_now, - block_tick, - no_show_duration, - session_info.needed_approvals as _, - ); + let TranchesToApproveResult { required_tranches, total_observed_no_shows } = + approval_checking::tranches_to_approve( + approval_entry, + candidate_entry.approvals(), + tranche_now, + block_tick, + no_show_duration, + session_info.needed_approvals as _, + ); - let status = ApprovalStatus { required_tranches, block_tick, tranche_now }; + let status = ApprovalStatus { + required_tranches, + block_tick, + tranche_now, + last_no_shows: total_observed_no_shows, + }; Some((approval_entry, status)) } else { None } } + + // Returns the approval voting params from the RuntimeApi. + #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] + async fn get_approval_voting_params_or_default( + &self, + ctx: &mut Context, + session_index: SessionIndex, + block_hash: Hash, + ) -> Option { + let (s_tx, s_rx) = oneshot::channel(); + + ctx.send_message(RuntimeApiMessage::Request( + block_hash, + RuntimeApiRequest::ApprovalVotingParams(session_index, s_tx), + )) + .await; + + match s_rx.await { + Ok(Ok(params)) => { + gum::trace!( + target: LOG_TARGET, + approval_voting_params = ?params, + session = ?session_index, + "Using the following subsystem params" + ); + Some(params) + }, + Ok(Err(err)) => { + gum::debug!( + target: LOG_TARGET, + ?err, + "Could not request approval voting params from runtime" + ); + None + }, + Err(err) => { + gum::debug!( + target: LOG_TARGET, + ?err, + "Could not request approval voting params from runtime" + ); + None + }, + } + } } #[derive(Debug, Clone)] @@ -807,6 +935,7 @@ where }); let mut wakeups = Wakeups::default(); let mut currently_checking_set = CurrentlyCheckingSet::default(); + let mut delayed_approvals_timers = DelayedApprovalTimer::default(); let mut approvals_cache = LruMap::new(ByLength::new(APPROVAL_CACHE_SIZE)); let mut last_finalized_height: Option = { @@ -885,17 +1014,49 @@ where } actions + }, + (block_hash, validator_index) = delayed_approvals_timers.select_next_some() => { + gum::debug!( + target: LOG_TARGET, + ?block_hash, + ?validator_index, + "Sign approval for multiple candidates", + ); + + match maybe_create_signature( + &mut overlayed_db, + &mut session_info_provider, + &state, + &mut ctx, + block_hash, + validator_index, + &subsystem.metrics, + ).await { + Ok(Some(next_wakeup)) => { + delayed_approvals_timers.maybe_arm_timer(next_wakeup, state.clock.as_ref(), block_hash, validator_index); + }, + Ok(None) => {} + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?err, + "Failed to create signature", + ); + } + } + vec![] } }; if handle_actions( &mut ctx, - &state, + &mut state, &mut overlayed_db, &mut session_info_provider, &subsystem.metrics, &mut wakeups, &mut currently_checking_set, + &mut delayed_approvals_timers, &mut approvals_cache, &mut subsystem.mode, actions, @@ -937,12 +1098,13 @@ where #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn handle_actions( ctx: &mut Context, - state: &State, + state: &mut State, overlayed_db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, wakeups: &mut Wakeups, currently_checking_set: &mut CurrentlyCheckingSet, + delayed_approvals_timers: &mut DelayedApprovalTimer, approvals_cache: &mut LruMap, mode: &mut Mode, actions: Vec, @@ -973,6 +1135,7 @@ async fn handle_actions( session_info_provider, metrics, candidate_hash, + delayed_approvals_timers, approval_request, ) .await? @@ -1075,7 +1238,11 @@ async fn handle_actions( Action::BecomeActive => { *mode = Mode::Active; - let messages = distribution_messages_for_activation(overlayed_db, state)?; + let messages = distribution_messages_for_activation( + overlayed_db, + state, + delayed_approvals_timers, + )?; ctx.send_messages(messages.into_iter()).await; }, @@ -1101,7 +1268,7 @@ fn cores_to_candidate_indices( .iter() .position(|(core_index, _)| core_index.0 == claimed_core_index as u32) { - candidate_indices.push(candidate_index as CandidateIndex); + candidate_indices.push(candidate_index as _); } } @@ -1134,6 +1301,7 @@ fn get_assignment_core_indices( fn distribution_messages_for_activation( db: &OverlayedBackend<'_, impl Backend>, state: &State, + delayed_approvals_timers: &mut DelayedApprovalTimer, ) -> SubsystemResult> { let all_blocks: Vec = db.load_all_blocks()?; @@ -1172,8 +1340,8 @@ fn distribution_messages_for_activation( slot: block_entry.slot(), session: block_entry.session(), }); - - for (i, (_, candidate_hash)) in block_entry.candidates().iter().enumerate() { + let mut signatures_queued = HashSet::new(); + for (_, candidate_hash) in block_entry.candidates() { let _candidate_span = distribution_message_span.child("candidate").with_candidate(*candidate_hash); let candidate_entry = match db.load_candidate_entry(&candidate_hash)? { @@ -1200,6 +1368,15 @@ fn distribution_messages_for_activation( &candidate_hash, &block_entry, ) { + if block_entry.has_candidates_pending_signature() { + delayed_approvals_timers.maybe_arm_timer( + state.clock.tick_now(), + state.clock.as_ref(), + block_entry.block_hash(), + assignment.validator_index(), + ) + } + match cores_to_candidate_indices( &claimed_core_indices, &block_entry, @@ -1267,15 +1444,19 @@ fn distribution_messages_for_activation( continue }, } - - messages.push(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVote { - block_hash, - candidate_index: i as _, - validator: assignment.validator_index(), - signature: approval_sig, - }, - )); + if signatures_queued + .insert(approval_sig.signed_candidates_indices.clone()) + { + messages.push(ApprovalDistributionMessage::DistributeApproval( + IndirectSignedApprovalVoteV2 { + block_hash, + candidate_indices: approval_sig + .signed_candidates_indices, + validator: assignment.validator_index(), + signature: approval_sig.signature, + }, + )) + }; } else { gum::warn!( target: LOG_TARGET, @@ -1481,7 +1662,7 @@ async fn get_approval_signatures_for_candidate( ctx: &mut Context, db: &OverlayedBackend<'_, impl Backend>, candidate_hash: CandidateHash, - tx: oneshot::Sender>, + tx: oneshot::Sender, ValidatorSignature)>>, ) -> SubsystemResult<()> { let send_votes = |votes| { if let Err(_) = tx.send(votes) { @@ -1507,6 +1688,11 @@ async fn get_approval_signatures_for_candidate( let relay_hashes = entry.block_assignments.keys(); let mut candidate_indices = HashSet::new(); + let mut candidate_indices_to_candidate_hashes: HashMap< + Hash, + HashMap, + > = HashMap::new(); + // Retrieve `CoreIndices`/`CandidateIndices` as required by approval-distribution: for hash in relay_hashes { let entry = match db.load_block_entry(hash)? { @@ -1524,8 +1710,11 @@ async fn get_approval_signatures_for_candidate( for (candidate_index, (_core_index, c_hash)) in entry.candidates().iter().enumerate() { if c_hash == &candidate_hash { candidate_indices.insert((*hash, candidate_index as u32)); - break } + candidate_indices_to_candidate_hashes + .entry(*hash) + .or_default() + .insert(candidate_index as _, *c_hash); } } @@ -1550,7 +1739,55 @@ async fn get_approval_signatures_for_candidate( target: LOG_TARGET, "Request for approval signatures got cancelled by `approval-distribution`." ), - Some(Ok(votes)) => send_votes(votes), + Some(Ok(votes)) => { + let votes = votes + .into_iter() + .filter_map(|(validator_index, (hash, signed_candidates_indices, signature))| { + let candidates_hashes = candidate_indices_to_candidate_hashes.get(&hash); + + if candidates_hashes.is_none() { + gum::warn!( + target: LOG_TARGET, + ?hash, + "Possible bug! Could not find map of candidate_hashes for block hash received from approval-distribution" + ); + } + + let num_signed_candidates = signed_candidates_indices.len(); + + let signed_candidates_hashes: Vec = + signed_candidates_indices + .into_iter() + .filter_map(|candidate_index| { + candidates_hashes.and_then(|candidate_hashes| { + if let Some(candidate_hash) = + candidate_hashes.get(&candidate_index) + { + Some(*candidate_hash) + } else { + gum::warn!( + target: LOG_TARGET, + ?candidate_index, + "Possible bug! Could not find candidate hash for candidate_index coming from approval-distribution" + ); + None + } + }) + }) + .collect(); + if num_signed_candidates == signed_candidates_hashes.len() { + Some((validator_index, (signed_candidates_hashes, signature))) + } else { + gum::warn!( + target: LOG_TARGET, + "Possible bug! Could not find all hashes for candidates coming from approval-distribution" + ); + None + } + }) + .collect(); + send_votes(votes) + }, } }; @@ -2184,7 +2421,7 @@ async fn check_and_import_approval( db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, - approval: IndirectSignedApprovalVote, + approval: IndirectSignedApprovalVoteV2, with_response: impl FnOnce(ApprovalCheckResult) -> T, ) -> SubsystemResult<(Vec, T)> where @@ -2196,13 +2433,12 @@ where return Ok((Vec::new(), t)) }}; } - let mut span = state .spans .get(&approval.block_hash) .map(|span| span.child("check-and-import-approval")) .unwrap_or_else(|| jaeger::Span::new(approval.block_hash, "check-and-import-approval")) - .with_uint_tag("candidate-index", approval.candidate_index as u64) + .with_string_fmt_debug_tag("candidate-index", approval.candidate_indices.clone()) .with_relay_parent(approval.block_hash) .with_stage(jaeger::Stage::ApprovalChecking); @@ -2215,105 +2451,163 @@ where }, }; - let session_info = match get_session_info( - session_info_provider, - sender, - approval.block_hash, - block_entry.session(), - ) - .await - { - Some(s) => s, - None => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::UnknownSessionIndex( - block_entry.session() - ),)) - }, - }; + let approved_candidates_info: Result, ApprovalCheckError> = + approval + .candidate_indices + .iter_ones() + .map(|candidate_index| { + block_entry + .candidate(candidate_index) + .ok_or(ApprovalCheckError::InvalidCandidateIndex(candidate_index as _)) + .map(|candidate| (candidate_index as _, candidate.1)) + }) + .collect(); - let approved_candidate_hash = match block_entry.candidate(approval.candidate_index as usize) { - Some((_, h)) => *h, - None => respond_early!(ApprovalCheckResult::Bad( - ApprovalCheckError::InvalidCandidateIndex(approval.candidate_index), - )), + let approved_candidates_info = match approved_candidates_info { + Ok(approved_candidates_info) => approved_candidates_info, + Err(err) => { + respond_early!(ApprovalCheckResult::Bad(err)) + }, }; - span.add_string_tag("candidate-hash", format!("{:?}", approved_candidate_hash)); + span.add_string_tag("candidate-hashes", format!("{:?}", approved_candidates_info)); span.add_string_tag( - "traceID", - format!("{:?}", hash_to_trace_identifier(approved_candidate_hash.0)), + "traceIDs", + format!( + "{:?}", + approved_candidates_info + .iter() + .map(|(_, approved_candidate_hash)| hash_to_trace_identifier( + approved_candidate_hash.0 + )) + .collect_vec() + ), ); - let pubkey = match session_info.validators.get(approval.validator) { - Some(k) => k, - None => respond_early!(ApprovalCheckResult::Bad( - ApprovalCheckError::InvalidValidatorIndex(approval.validator), - )), - }; + { + let session_info = match get_session_info( + session_info_provider, + sender, + approval.block_hash, + block_entry.session(), + ) + .await + { + Some(s) => s, + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::UnknownSessionIndex( + block_entry.session() + ),)) + }, + }; - // Signature check: - match DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking).check_signature( - &pubkey, - approved_candidate_hash, - block_entry.session(), - &approval.signature, - ) { - Err(_) => respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidSignature( - approval.validator - ),)), - Ok(()) => {}, - }; + let pubkey = match session_info.validators.get(approval.validator) { + Some(k) => k, + None => respond_early!(ApprovalCheckResult::Bad( + ApprovalCheckError::InvalidValidatorIndex(approval.validator), + )), + }; - let candidate_entry = match db.load_candidate_entry(&approved_candidate_hash)? { - Some(c) => c, - None => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidCandidate( - approval.candidate_index, - approved_candidate_hash - ),)) - }, - }; + gum::trace!( + target: LOG_TARGET, + "Received approval for num_candidates {:}", + approval.candidate_indices.count_ones() + ); - // Don't accept approvals until assignment. - match candidate_entry.approval_entry(&approval.block_hash) { - None => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::Internal( - approval.block_hash, - approved_candidate_hash - ),)) - }, - Some(e) if !e.is_assigned(approval.validator) => { - respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::NoAssignment( - approval.validator - ),)) - }, - _ => {}, + let candidate_hashes: Vec = + approved_candidates_info.iter().map(|candidate| candidate.1).collect(); + // Signature check: + match DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes.clone()), + ) + .check_signature( + &pubkey, + if let Some(candidate_hash) = candidate_hashes.first() { + *candidate_hash + } else { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidValidatorIndex( + approval.validator + ),)) + }, + block_entry.session(), + &approval.signature, + ) { + Err(_) => { + gum::error!( + target: LOG_TARGET, + "Error while checking signature {:}", + approval.candidate_indices.count_ones() + ); + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidSignature( + approval.validator + ),)) + }, + Ok(()) => {}, + }; } - // importing the approval can be heavy as it may trigger acceptance for a series of blocks. - let t = with_response(ApprovalCheckResult::Accepted); + let mut actions = Vec::new(); + for (approval_candidate_index, approved_candidate_hash) in approved_candidates_info { + let block_entry = match db.load_block_entry(&approval.block_hash)? { + Some(b) => b, + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::UnknownBlock( + approval.block_hash + ),)) + }, + }; - gum::trace!( - target: LOG_TARGET, - validator_index = approval.validator.0, - validator = ?pubkey, - candidate_hash = ?approved_candidate_hash, - para_id = ?candidate_entry.candidate_receipt().descriptor.para_id, - "Importing approval vote", - ); + let candidate_entry = match db.load_candidate_entry(&approved_candidate_hash)? { + Some(c) => c, + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidCandidate( + approval_candidate_index, + approved_candidate_hash + ),)) + }, + }; - let actions = advance_approval_state( - sender, - state, - db, - session_info_provider, - &metrics, - block_entry, - approved_candidate_hash, - candidate_entry, - ApprovalStateTransition::RemoteApproval(approval.validator), - ) - .await; + // Don't accept approvals until assignment. + match candidate_entry.approval_entry(&approval.block_hash) { + None => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::Internal( + approval.block_hash, + approved_candidate_hash + ),)) + }, + Some(e) if !e.is_assigned(approval.validator) => { + respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::NoAssignment( + approval.validator + ),)) + }, + _ => {}, + } + + gum::debug!( + target: LOG_TARGET, + validator_index = approval.validator.0, + candidate_hash = ?approved_candidate_hash, + para_id = ?candidate_entry.candidate_receipt().descriptor.para_id, + "Importing approval vote", + ); + + let new_actions = advance_approval_state( + sender, + state, + db, + session_info_provider, + &metrics, + block_entry, + approved_candidate_hash, + candidate_entry, + ApprovalStateTransition::RemoteApproval(approval.validator), + ) + .await; + actions.extend(new_actions); + } + + // importing the approval can be heavy as it may trigger acceptance for a series of blocks. + let t = with_response(ApprovalCheckResult::Accepted); Ok((actions, t)) } @@ -2321,7 +2615,7 @@ where #[derive(Debug)] enum ApprovalStateTransition { RemoteApproval(ValidatorIndex), - LocalApproval(ValidatorIndex, ValidatorSignature), + LocalApproval(ValidatorIndex), WakeupProcessed, } @@ -2329,7 +2623,7 @@ impl ApprovalStateTransition { fn validator_index(&self) -> Option { match *self { ApprovalStateTransition::RemoteApproval(v) | - ApprovalStateTransition::LocalApproval(v, _) => Some(v), + ApprovalStateTransition::LocalApproval(v) => Some(v), ApprovalStateTransition::WakeupProcessed => None, } } @@ -2337,7 +2631,7 @@ impl ApprovalStateTransition { fn is_local_approval(&self) -> bool { match *self { ApprovalStateTransition::RemoteApproval(_) => false, - ApprovalStateTransition::LocalApproval(_, _) => true, + ApprovalStateTransition::LocalApproval(_) => true, ApprovalStateTransition::WakeupProcessed => false, } } @@ -2404,7 +2698,16 @@ where // assignment tick of `now - APPROVAL_DELAY` - that is, that // all counted assignments are at least `APPROVAL_DELAY` ticks old. let is_approved = check.is_approved(tick_now.saturating_sub(APPROVAL_DELAY)); - + if status.last_no_shows != 0 { + metrics.on_observed_no_shows(status.last_no_shows); + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?block_hash, + last_no_shows = ?status.last_no_shows, + "Observed no_shows", + ); + } if is_approved { gum::trace!( target: LOG_TARGET, @@ -2422,6 +2725,12 @@ where if no_shows != 0 { metrics.on_no_shows(no_shows); } + if check == Check::ApprovedOneThird { + // No-shows are not counted when more than one third of validators approve a + // candidate, so count candidates where more than one third of validators had to + // approve it, this is indicative of something breaking. + metrics.on_approved_by_one_third() + } metrics.on_candidate_approved(status.tranche_now as _); @@ -2430,6 +2739,10 @@ where actions.push(Action::NoteApprovedInChainSelection(block_hash)); } + db.write_block_entry(block_entry.into()); + } else if transition.is_local_approval() { + // Local approvals always update the block_entry, so we need to flush it to + // the database. db.write_block_entry(block_entry.into()); } @@ -2458,10 +2771,6 @@ where approval_entry.mark_approved(); } - if let ApprovalStateTransition::LocalApproval(_, ref sig) = transition { - approval_entry.import_approval_sig(sig.clone()); - } - actions.extend(schedule_wakeup_action( &approval_entry, block_hash, @@ -2599,7 +2908,7 @@ async fn process_wakeup( let should_trigger = should_trigger_assignment( &approval_entry, &candidate_entry, - tranches_to_approve, + tranches_to_approve.required_tranches, tranche_now, ); @@ -2924,11 +3233,12 @@ async fn launch_approval( #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn issue_approval( ctx: &mut Context, - state: &State, + state: &mut State, db: &mut OverlayedBackend<'_, impl Backend>, session_info_provider: &mut RuntimeInfo, metrics: &Metrics, candidate_hash: CandidateHash, + delayed_approvals_timers: &mut DelayedApprovalTimer, ApprovalVoteRequest { validator_index, block_hash }: ApprovalVoteRequest, ) -> SubsystemResult> { let mut issue_approval_span = state @@ -2942,7 +3252,7 @@ async fn issue_approval( .with_validator_index(validator_index) .with_stage(jaeger::Stage::ApprovalChecking); - let block_entry = match db.load_block_entry(&block_hash)? { + let mut block_entry = match db.load_block_entry(&block_hash)? { Some(b) => b, None => { // not a cause for alarm - just lost a race with pruning, most likely. @@ -2968,21 +3278,6 @@ async fn issue_approval( }; issue_approval_span.add_int_tag("candidate_index", candidate_index as i64); - let session_info = match get_session_info( - session_info_provider, - ctx.sender(), - block_entry.parent_hash(), - block_entry.session(), - ) - .await - { - Some(s) => s, - None => { - metrics.on_approval_error(); - return Ok(Vec::new()) - }, - }; - let candidate_hash = match block_entry.candidate(candidate_index as usize) { Some((_, h)) => *h, None => { @@ -3013,10 +3308,149 @@ async fn issue_approval( }, }; + let session_info = match get_session_info( + session_info_provider, + ctx.sender(), + block_entry.parent_hash(), + block_entry.session(), + ) + .await + { + Some(s) => s, + None => return Ok(Vec::new()), + }; + + if block_entry + .defer_candidate_signature( + candidate_index as _, + candidate_hash, + compute_delayed_approval_sending_tick( + state, + &block_entry, + &candidate_entry, + session_info, + &metrics, + ), + ) + .is_some() + { + gum::error!( + target: LOG_TARGET, + ?candidate_hash, + ?block_hash, + validator_index = validator_index.0, + "Possible bug, we shouldn't have to defer a candidate more than once", + ); + } + + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + ?block_hash, + validator_index = validator_index.0, + "Ready to issue approval vote", + ); + + let actions = advance_approval_state( + ctx.sender(), + state, + db, + session_info_provider, + metrics, + block_entry, + candidate_hash, + candidate_entry, + ApprovalStateTransition::LocalApproval(validator_index as _), + ) + .await; + + if let Some(next_wakeup) = maybe_create_signature( + db, + session_info_provider, + state, + ctx, + block_hash, + validator_index, + metrics, + ) + .await? + { + delayed_approvals_timers.maybe_arm_timer( + next_wakeup, + state.clock.as_ref(), + block_hash, + validator_index, + ); + } + Ok(actions) +} + +// Create signature for the approved candidates pending signatures +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn maybe_create_signature( + db: &mut OverlayedBackend<'_, impl Backend>, + session_info_provider: &mut RuntimeInfo, + state: &State, + ctx: &mut Context, + block_hash: Hash, + validator_index: ValidatorIndex, + metrics: &Metrics, +) -> SubsystemResult> { + let mut block_entry = match db.load_block_entry(&block_hash)? { + Some(b) => b, + None => { + // not a cause for alarm - just lost a race with pruning, most likely. + metrics.on_approval_stale(); + gum::debug!( + target: LOG_TARGET, + "Could not find block that needs signature {:}", block_hash + ); + return Ok(None) + }, + }; + + let approval_params = state + .get_approval_voting_params_or_default(ctx, block_entry.session(), block_hash) + .await + .unwrap_or_default(); + + gum::trace!( + target: LOG_TARGET, + "Candidates pending signatures {:}", block_entry.num_candidates_pending_signature() + ); + let tick_now = state.clock.tick_now(); + + let (candidates_to_sign, sign_no_later_then) = block_entry + .get_candidates_that_need_signature(tick_now, approval_params.max_approval_coalesce_count); + + let (candidates_hashes, candidates_indices) = match candidates_to_sign { + Some(candidates_to_sign) => candidates_to_sign, + None => return Ok(sign_no_later_then), + }; + + let session_info = match get_session_info( + session_info_provider, + ctx.sender(), + block_entry.parent_hash(), + block_entry.session(), + ) + .await + { + Some(s) => s, + None => { + metrics.on_approval_error(); + gum::error!( + target: LOG_TARGET, + "Could not retrieve the session" + ); + return Ok(None) + }, + }; + let validator_pubkey = match session_info.validators.get(validator_index) { Some(p) => p, None => { - gum::warn!( + gum::error!( target: LOG_TARGET, "Validator index {} out of bounds in session {}", validator_index.0, @@ -3024,72 +3458,89 @@ async fn issue_approval( ); metrics.on_approval_error(); - return Ok(Vec::new()) + return Ok(None) }, }; - let session = block_entry.session(); - let sig = match sign_approval(&state.keystore, &validator_pubkey, candidate_hash, session) { + let signature = match sign_approval( + &state.keystore, + &validator_pubkey, + &candidates_hashes, + block_entry.session(), + ) { Some(sig) => sig, None => { - gum::warn!( + gum::error!( target: LOG_TARGET, validator_index = ?validator_index, - session, + session = ?block_entry.session(), "Could not issue approval signature. Assignment key present but not validator key?", ); metrics.on_approval_error(); - return Ok(Vec::new()) + return Ok(None) }, }; + metrics.on_approval_coalesce(candidates_hashes.len() as u32); - gum::trace!( - target: LOG_TARGET, - ?candidate_hash, - ?block_hash, - validator_index = validator_index.0, - "Issuing approval vote", - ); + let candidate_entries = candidates_hashes + .iter() + .map(|candidate_hash| db.load_candidate_entry(candidate_hash)) + .collect::>>>()?; - let actions = advance_approval_state( - ctx.sender(), - state, - db, - session_info_provider, - metrics, - block_entry, - candidate_hash, - candidate_entry, - ApprovalStateTransition::LocalApproval(validator_index as _, sig.clone()), - ) - .await; + for mut candidate_entry in candidate_entries { + let approval_entry = candidate_entry.as_mut().and_then(|candidate_entry| { + candidate_entry.approval_entry_mut(&block_entry.block_hash()) + }); + + match approval_entry { + Some(approval_entry) => approval_entry.import_approval_sig(OurApproval { + signature: signature.clone(), + signed_candidates_indices: candidates_indices.clone(), + }), + None => { + gum::error!( + target: LOG_TARGET, + candidate_entry = ?candidate_entry, + "Candidate scheduled for signing approval entry should not be None" + ); + }, + }; + candidate_entry.map(|candidate_entry| db.write_candidate_entry(candidate_entry)); + } metrics.on_approval_produced(); - // dispatch to approval distribution. ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeApproval( - IndirectSignedApprovalVote { - block_hash, - candidate_index: candidate_index as _, + IndirectSignedApprovalVoteV2 { + block_hash: block_entry.block_hash(), + candidate_indices: candidates_indices, validator: validator_index, - signature: sig, + signature, }, )); - Ok(actions) + gum::trace!( + target: LOG_TARGET, + ?block_hash, + signed_candidates = ?block_entry.num_candidates_pending_signature(), + "Issue approval votes", + ); + block_entry.issued_approval(); + db.write_block_entry(block_entry.into()); + Ok(None) } // Sign an approval vote. Fails if the key isn't present in the store. fn sign_approval( keystore: &LocalKeystore, public: &ValidatorId, - candidate_hash: CandidateHash, + candidate_hashes: &[CandidateHash], session_index: SessionIndex, ) -> Option { let key = keystore.key_pair::(public).ok().flatten()?; - let payload = ApprovalVote(candidate_hash).signing_payload(session_index); + let payload = ApprovalVoteMultipleCandidates(candidate_hashes).signing_payload(session_index); Some(key.sign(&payload[..])) } @@ -3119,3 +3570,38 @@ fn issue_local_invalid_statement( false, )); } + +// Computes what is the latest tick we can send an approval +fn compute_delayed_approval_sending_tick( + state: &State, + block_entry: &BlockEntry, + candidate_entry: &CandidateEntry, + session_info: &SessionInfo, + metrics: &Metrics, +) -> Tick { + let current_block_tick = slot_number_to_tick(state.slot_duration_millis, block_entry.slot()); + let assignment_tranche = candidate_entry + .approval_entry(&block_entry.block_hash()) + .and_then(|approval_entry| approval_entry.our_assignment()) + .map(|our_assignment| our_assignment.tranche()) + .unwrap_or_default(); + + let assignment_triggered_tick = current_block_tick + assignment_tranche as Tick; + + let no_show_duration_ticks = slot_number_to_tick( + state.slot_duration_millis, + Slot::from(u64::from(session_info.no_show_slots)), + ); + let tick_now = state.clock.tick_now(); + + let sign_no_later_than = min( + tick_now + MAX_APPROVAL_COALESCE_WAIT_TICKS as Tick, + // We don't want to accidentally cause no-shows, so if we are past + // the second half of the no show time, force the sending of the + // approval immediately. + assignment_triggered_tick + no_show_duration_ticks / 2, + ); + + metrics.on_delayed_approval(sign_no_later_than.checked_sub(tick_now).unwrap_or_default()); + sign_no_later_than +} diff --git a/polkadot/node/core/approval-voting/src/ops.rs b/polkadot/node/core/approval-voting/src/ops.rs index a6f0ecf9d1f0..2a8fdba5aa36 100644 --- a/polkadot/node/core/approval-voting/src/ops.rs +++ b/polkadot/node/core/approval-voting/src/ops.rs @@ -25,7 +25,7 @@ use polkadot_primitives::{BlockNumber, CandidateHash, CandidateReceipt, GroupInd use std::collections::{hash_map::Entry, BTreeMap, HashMap}; use super::{ - approval_db::v2::{OurAssignment, StoredBlockRange}, + approval_db::{common::StoredBlockRange, v2::OurAssignment}, backend::{Backend, OverlayedBackend}, persisted_entries::{ApprovalEntry, BlockEntry, CandidateEntry}, LOG_TARGET, diff --git a/polkadot/node/core/approval-voting/src/persisted_entries.rs b/polkadot/node/core/approval-voting/src/persisted_entries.rs index 9cfe1c4cf8da..ef47bdb2213a 100644 --- a/polkadot/node/core/approval-voting/src/persisted_entries.rs +++ b/polkadot/node/core/approval-voting/src/persisted_entries.rs @@ -20,13 +20,14 @@ //! Within that context, things are plain-old-data. Within this module, //! data and logic are intertwined. +use itertools::Itertools; use polkadot_node_primitives::approval::{ v1::{DelayTranche, RelayVRFStory}, v2::{AssignmentCertV2, CandidateBitfield}, }; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, CoreIndex, GroupIndex, Hash, SessionIndex, - ValidatorIndex, ValidatorSignature, + BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, CoreIndex, GroupIndex, Hash, + SessionIndex, ValidatorIndex, ValidatorSignature, }; use sp_consensus_slots::Slot; @@ -76,6 +77,45 @@ impl From for crate::approval_db::v2::TrancheEntry { } } +impl From for OurApproval { + fn from(approval: crate::approval_db::v3::OurApproval) -> Self { + Self { + signature: approval.signature, + signed_candidates_indices: approval.signed_candidates_indices, + } + } +} +impl From for crate::approval_db::v3::OurApproval { + fn from(approval: OurApproval) -> Self { + Self { + signature: approval.signature, + signed_candidates_indices: approval.signed_candidates_indices, + } + } +} + +/// Metadata about our approval signature +#[derive(Debug, Clone, PartialEq)] +pub struct OurApproval { + /// The signature for the candidates hashes pointed by indices. + pub signature: ValidatorSignature, + /// The indices of the candidates signed in this approval. + pub signed_candidates_indices: CandidateBitfield, +} + +impl OurApproval { + /// Converts a ValidatorSignature to an OurApproval. + /// It used in converting the database from v1 to latest. + pub fn from_v1(value: ValidatorSignature, candidate_index: CandidateIndex) -> Self { + Self { signature: value, signed_candidates_indices: candidate_index.into() } + } + + /// Converts a ValidatorSignature to an OurApproval. + /// It used in converting the database from v2 to latest. + pub fn from_v2(value: ValidatorSignature, candidate_index: CandidateIndex) -> Self { + Self::from_v1(value, candidate_index) + } +} /// Metadata regarding approval of a particular candidate within the context of some /// particular block. #[derive(Debug, Clone, PartialEq)] @@ -83,7 +123,7 @@ pub struct ApprovalEntry { tranches: Vec, backing_group: GroupIndex, our_assignment: Option, - our_approval_sig: Option, + our_approval_sig: Option, // `n_validators` bits. assigned_validators: Bitfield, approved: bool, @@ -95,7 +135,7 @@ impl ApprovalEntry { tranches: Vec, backing_group: GroupIndex, our_assignment: Option, - our_approval_sig: Option, + our_approval_sig: Option, // `n_validators` bits. assigned_validators: Bitfield, approved: bool, @@ -137,7 +177,7 @@ impl ApprovalEntry { } /// Import our local approval vote signature for this candidate. - pub fn import_approval_sig(&mut self, approval_sig: ValidatorSignature) { + pub fn import_approval_sig(&mut self, approval_sig: OurApproval) { self.our_approval_sig = Some(approval_sig); } @@ -224,7 +264,7 @@ impl ApprovalEntry { /// Get the assignment cert & approval signature. /// /// The approval signature will only be `Some` if the assignment is too. - pub fn local_statements(&self) -> (Option, Option) { + pub fn local_statements(&self) -> (Option, Option) { let approval_sig = self.our_approval_sig.clone(); if let Some(our_assignment) = self.our_assignment.as_ref().filter(|a| a.triggered()) { (Some(our_assignment.clone()), approval_sig) @@ -232,10 +272,44 @@ impl ApprovalEntry { (None, None) } } + + // Convert an ApprovalEntry from v1 version to latest version + pub fn from_v1( + value: crate::approval_db::v1::ApprovalEntry, + candidate_index: CandidateIndex, + ) -> Self { + ApprovalEntry { + tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), + backing_group: value.backing_group, + our_assignment: value.our_assignment.map(|assignment| assignment.into()), + our_approval_sig: value + .our_approval_sig + .map(|sig| OurApproval::from_v1(sig, candidate_index)), + assigned_validators: value.assignments, + approved: value.approved, + } + } + + // Convert an ApprovalEntry from v1 version to latest version + pub fn from_v2( + value: crate::approval_db::v2::ApprovalEntry, + candidate_index: CandidateIndex, + ) -> Self { + ApprovalEntry { + tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), + backing_group: value.backing_group, + our_assignment: value.our_assignment.map(|assignment| assignment.into()), + our_approval_sig: value + .our_approval_sig + .map(|sig| OurApproval::from_v2(sig, candidate_index)), + assigned_validators: value.assigned_validators, + approved: value.approved, + } + } } -impl From for ApprovalEntry { - fn from(entry: crate::approval_db::v2::ApprovalEntry) -> Self { +impl From for ApprovalEntry { + fn from(entry: crate::approval_db::v3::ApprovalEntry) -> Self { ApprovalEntry { tranches: entry.tranches.into_iter().map(Into::into).collect(), backing_group: entry.backing_group, @@ -247,7 +321,7 @@ impl From for ApprovalEntry { } } -impl From for crate::approval_db::v2::ApprovalEntry { +impl From for crate::approval_db::v3::ApprovalEntry { fn from(entry: ApprovalEntry) -> Self { Self { tranches: entry.tranches.into_iter().map(Into::into).collect(), @@ -303,10 +377,44 @@ impl CandidateEntry { pub fn approval_entry(&self, block_hash: &Hash) -> Option<&ApprovalEntry> { self.block_assignments.get(block_hash) } + + /// Convert a CandidateEntry from a v1 to its latest equivalent. + pub fn from_v1( + value: crate::approval_db::v1::CandidateEntry, + candidate_index: CandidateIndex, + ) -> Self { + Self { + approvals: value.approvals, + block_assignments: value + .block_assignments + .into_iter() + .map(|(h, ae)| (h, ApprovalEntry::from_v1(ae, candidate_index))) + .collect(), + candidate: value.candidate, + session: value.session, + } + } + + /// Convert a CandidateEntry from a v2 to its latest equivalent. + pub fn from_v2( + value: crate::approval_db::v2::CandidateEntry, + candidate_index: CandidateIndex, + ) -> Self { + Self { + approvals: value.approvals, + block_assignments: value + .block_assignments + .into_iter() + .map(|(h, ae)| (h, ApprovalEntry::from_v2(ae, candidate_index))) + .collect(), + candidate: value.candidate, + session: value.session, + } + } } -impl From for CandidateEntry { - fn from(entry: crate::approval_db::v2::CandidateEntry) -> Self { +impl From for CandidateEntry { + fn from(entry: crate::approval_db::v3::CandidateEntry) -> Self { CandidateEntry { candidate: entry.candidate, session: entry.session, @@ -320,7 +428,7 @@ impl From for CandidateEntry { } } -impl From for crate::approval_db::v2::CandidateEntry { +impl From for crate::approval_db::v3::CandidateEntry { fn from(entry: CandidateEntry) -> Self { Self { candidate: entry.candidate, @@ -353,12 +461,21 @@ pub struct BlockEntry { // block. The block can be considered approved if the bitfield has all bits set to `true`. pub approved_bitfield: Bitfield, pub children: Vec, + // A list of candidates we have checked, but didn't not sign and + // advertise the vote yet. + candidates_pending_signature: BTreeMap, // A list of assignments for which we already distributed the assignment. // We use this to ensure we don't distribute multiple core assignments twice as we track // individual wakeups for each core. distributed_assignments: Bitfield, } +#[derive(Debug, Clone, PartialEq)] +pub struct CandidateSigningContext { + pub candidate_hash: CandidateHash, + pub sign_no_later_than_tick: Tick, +} + impl BlockEntry { /// Mark a candidate as fully approved in the bitfield. pub fn mark_approved_by_hash(&mut self, candidate_hash: &CandidateHash) { @@ -447,10 +564,97 @@ impl BlockEntry { distributed } + + /// Defer signing and issuing an approval for a candidate no later than the specified tick + pub fn defer_candidate_signature( + &mut self, + candidate_index: CandidateIndex, + candidate_hash: CandidateHash, + sign_no_later_than_tick: Tick, + ) -> Option { + self.candidates_pending_signature.insert( + candidate_index, + CandidateSigningContext { candidate_hash, sign_no_later_than_tick }, + ) + } + + /// Returns the number of candidates waiting for an approval to be issued. + pub fn num_candidates_pending_signature(&self) -> usize { + self.candidates_pending_signature.len() + } + + /// Return if we have candidates waiting for signature to be issued + pub fn has_candidates_pending_signature(&self) -> bool { + !self.candidates_pending_signature.is_empty() + } + + /// Candidate hashes for candidates pending signatures + fn candidate_hashes_pending_signature(&self) -> Vec { + self.candidates_pending_signature + .values() + .map(|unsigned_approval| unsigned_approval.candidate_hash) + .collect() + } + + /// Candidate indices for candidates pending signature + fn candidate_indices_pending_signature(&self) -> Option { + self.candidates_pending_signature + .keys() + .map(|val| *val) + .collect_vec() + .try_into() + .ok() + } + + /// Returns a list of candidates hashes that need need signature created at the current tick: + /// This might happen in other of the two reasons: + /// 1. We queued more than max_approval_coalesce_count candidates. + /// 2. We have candidates that waiting in the queue past their `sign_no_later_than_tick` + /// + /// Additionally, we also return the first tick when we will have to create a signature, + /// so that the caller can arm the timer if it is not already armed. + pub fn get_candidates_that_need_signature( + &self, + tick_now: Tick, + max_approval_coalesce_count: u32, + ) -> (Option<(Vec, CandidateBitfield)>, Option) { + let sign_no_later_than_tick = self + .candidates_pending_signature + .values() + .min_by(|a, b| a.sign_no_later_than_tick.cmp(&b.sign_no_later_than_tick)) + .map(|val| val.sign_no_later_than_tick); + + if let Some(sign_no_later_than_tick) = sign_no_later_than_tick { + if sign_no_later_than_tick <= tick_now || + self.num_candidates_pending_signature() >= max_approval_coalesce_count as usize + { + ( + self.candidate_indices_pending_signature().and_then(|candidate_indices| { + Some((self.candidate_hashes_pending_signature(), candidate_indices)) + }), + Some(sign_no_later_than_tick), + ) + } else { + // We can still wait for other candidates to queue in, so just make sure + // we wake up at the tick we have to sign the longest waiting candidate. + (Default::default(), Some(sign_no_later_than_tick)) + } + } else { + // No cached candidates, nothing to do here, this just means the timer fired, + // but the signatures were already sent because we gathered more than + // max_approval_coalesce_count. + (Default::default(), sign_no_later_than_tick) + } + } + + /// Clears the candidates pending signature because the approval was issued. + pub fn issued_approval(&mut self) { + self.candidates_pending_signature.clear(); + } } -impl From for BlockEntry { - fn from(entry: crate::approval_db::v2::BlockEntry) -> Self { +impl From for BlockEntry { + fn from(entry: crate::approval_db::v3::BlockEntry) -> Self { BlockEntry { block_hash: entry.block_hash, parent_hash: entry.parent_hash, @@ -461,6 +665,11 @@ impl From for BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + candidates_pending_signature: entry + .candidates_pending_signature + .into_iter() + .map(|(candidate_index, signing_context)| (candidate_index, signing_context.into())) + .collect(), distributed_assignments: entry.distributed_assignments, } } @@ -479,11 +688,30 @@ impl From for BlockEntry { approved_bitfield: entry.approved_bitfield, children: entry.children, distributed_assignments: Default::default(), + candidates_pending_signature: Default::default(), } } } -impl From for crate::approval_db::v2::BlockEntry { +impl From for BlockEntry { + fn from(entry: crate::approval_db::v2::BlockEntry) -> Self { + BlockEntry { + block_hash: entry.block_hash, + parent_hash: entry.parent_hash, + block_number: entry.block_number, + session: entry.session, + slot: entry.slot, + relay_vrf_story: RelayVRFStory(entry.relay_vrf_story), + candidates: entry.candidates, + approved_bitfield: entry.approved_bitfield, + children: entry.children, + distributed_assignments: entry.distributed_assignments, + candidates_pending_signature: Default::default(), + } + } +} + +impl From for crate::approval_db::v3::BlockEntry { fn from(entry: BlockEntry) -> Self { Self { block_hash: entry.block_hash, @@ -495,36 +723,30 @@ impl From for crate::approval_db::v2::BlockEntry { candidates: entry.candidates, approved_bitfield: entry.approved_bitfield, children: entry.children, + candidates_pending_signature: entry + .candidates_pending_signature + .into_iter() + .map(|(candidate_index, signing_context)| (candidate_index, signing_context.into())) + .collect(), distributed_assignments: entry.distributed_assignments, } } } -/// Migration helpers. -impl From for CandidateEntry { - fn from(value: crate::approval_db::v1::CandidateEntry) -> Self { +impl From for CandidateSigningContext { + fn from(signing_context: crate::approval_db::v3::CandidateSigningContext) -> Self { Self { - approvals: value.approvals, - block_assignments: value - .block_assignments - .into_iter() - .map(|(h, ae)| (h, ae.into())) - .collect(), - candidate: value.candidate, - session: value.session, + candidate_hash: signing_context.candidate_hash, + sign_no_later_than_tick: signing_context.sign_no_later_than_tick.into(), } } } -impl From for ApprovalEntry { - fn from(value: crate::approval_db::v1::ApprovalEntry) -> Self { - ApprovalEntry { - tranches: value.tranches.into_iter().map(|tranche| tranche.into()).collect(), - backing_group: value.backing_group, - our_assignment: value.our_assignment.map(|assignment| assignment.into()), - our_approval_sig: value.our_approval_sig, - assigned_validators: value.assignments, - approved: value.approved, +impl From for crate::approval_db::v3::CandidateSigningContext { + fn from(signing_context: CandidateSigningContext) -> Self { + Self { + candidate_hash: signing_context.candidate_hash, + sign_no_later_than_tick: signing_context.sign_no_later_than_tick.into(), } } } diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index cdfc170cd2ba..498cf62bb30e 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -37,8 +37,8 @@ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_overseer::HeadSupportsParachains; use polkadot_primitives::{ - vstaging::NodeFeatures, CandidateCommitments, CandidateEvent, CoreIndex, GroupIndex, Header, - Id as ParaId, IndexedVec, ValidationCode, ValidatorSignature, + vstaging::NodeFeatures, ApprovalVote, CandidateCommitments, CandidateEvent, CoreIndex, + GroupIndex, Header, Id as ParaId, IndexedVec, ValidationCode, ValidatorSignature, }; use std::time::Duration; @@ -56,7 +56,7 @@ use std::{ }; use super::{ - approval_db::v2::StoredBlockRange, + approval_db::common::StoredBlockRange, backend::BackendWriteOp, import::tests::{ garbage_vrf_signature, AllowedSlots, BabeEpoch, BabeEpochConfiguration, @@ -116,7 +116,7 @@ fn make_sync_oracle(val: bool) -> (Box, TestSyncOracleHan #[cfg(test)] pub mod test_constants { - use crate::approval_db::v2::Config as DatabaseConfig; + use crate::approval_db::common::Config as DatabaseConfig; const DATA_COL: u32 = 0; pub(crate) const NUM_COLUMNS: u32 = 1; @@ -281,6 +281,7 @@ impl V1ReadBackend for TestStoreInner { fn load_candidate_entry_v1( &self, candidate_hash: &CandidateHash, + _candidate_index: CandidateIndex, ) -> SubsystemResult> { self.load_candidate_entry(candidate_hash) } @@ -364,6 +365,7 @@ impl V1ReadBackend for TestStore { fn load_candidate_entry_v1( &self, candidate_hash: &CandidateHash, + _candidate_index: CandidateIndex, ) -> SubsystemResult> { self.load_candidate_entry(candidate_hash) } @@ -446,6 +448,15 @@ fn sign_approval( key.sign(&ApprovalVote(candidate_hash).signing_payload(session_index)).into() } +fn sign_approval_multiple_candidates( + key: Sr25519Keyring, + candidate_hashes: Vec, + session_index: SessionIndex, +) -> ValidatorSignature { + key.sign(&ApprovalVoteMultipleCandidates(&candidate_hashes).signing_payload(session_index)) + .into() +} + type VirtualOverseer = test_helpers::TestSubsystemContextHandle; #[derive(Default)] @@ -641,7 +652,12 @@ async fn check_and_import_approval( overseer, FromOrchestra::Communication { msg: ApprovalVotingMessage::CheckAndImportApproval( - IndirectSignedApprovalVote { block_hash, candidate_index, validator, signature }, + IndirectSignedApprovalVoteV2 { + block_hash, + candidate_indices: candidate_index.into(), + validator, + signature, + }, tx, ), }, @@ -2014,6 +2030,91 @@ fn forkful_import_at_same_height_act_on_leaf() { }); } +#[test] +fn test_signing_a_single_candidate_is_backwards_compatible() { + let session_index = 1; + let block_hash = Hash::repeat_byte(0x01); + let candidate_descriptors = (1..10) + .into_iter() + .map(|val| make_candidate(ParaId::from(val as u32), &block_hash)) + .collect::>(); + + let candidate_hashes = candidate_descriptors + .iter() + .map(|candidate_descriptor| candidate_descriptor.hash()) + .collect_vec(); + + let first_descriptor = candidate_descriptors.first().unwrap(); + + let candidate_hash = first_descriptor.hash(); + + let sig_a = sign_approval(Sr25519Keyring::Alice, candidate_hash, session_index); + + let sig_b = sign_approval(Sr25519Keyring::Alice, candidate_hash, session_index); + + assert!(DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + candidate_hash, + session_index, + &sig_a, + ) + .is_ok()); + + assert!(DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + candidate_hash, + session_index, + &sig_b, + ) + .is_ok()); + + let sig_c = sign_approval_multiple_candidates( + Sr25519Keyring::Alice, + vec![candidate_hash], + session_index, + ); + + assert!(DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(vec![candidate_hash]) + ) + .check_signature(&Sr25519Keyring::Alice.public().into(), candidate_hash, session_index, &sig_c,) + .is_ok()); + + assert!(DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + candidate_hash, + session_index, + &sig_c, + ) + .is_ok()); + + assert!(DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(vec![candidate_hash]) + ) + .check_signature(&Sr25519Keyring::Alice.public().into(), candidate_hash, session_index, &sig_a,) + .is_ok()); + + let sig_all = sign_approval_multiple_candidates( + Sr25519Keyring::Alice, + candidate_hashes.clone(), + session_index, + ); + + assert!(DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes.clone()) + ) + .check_signature( + &Sr25519Keyring::Alice.public().into(), + *candidate_hashes.first().expect("test"), + session_index, + &sig_all, + ) + .is_ok()); +} + #[test] fn import_checked_approval_updates_entries_and_schedules() { let config = HarnessConfig::default(); @@ -2730,11 +2831,29 @@ async fn handle_double_assignment_import( } ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 1, + })); + } + ); + assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(_)) @@ -3469,3 +3588,455 @@ fn waits_until_approving_assignments_are_old_enough() { virtual_overseer }); } + +#[test] +fn test_approval_is_sent_on_max_approval_coalesce_count() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let assignments_cert = + garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _sync_oracle_handle } = + test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_commitments = CandidateCommitments::default(); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let candidate_hash1 = candidate_receipt1.hash(); + + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let slot = Slot::from(1); + let candidate_index1 = 0; + let candidate_index2 = 1; + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some(vec![ + (candidate_receipt1.clone(), CoreIndex(0), GroupIndex(0)), + (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), + ]); + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + }, + ) + .build(&mut virtual_overseer) + .await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash1).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + handle_approval_on_max_coalesce_count( + &mut virtual_overseer, + vec![candidate_index1, candidate_index2], + ) + .await; + + virtual_overseer + }); +} + +async fn handle_approval_on_max_coalesce_count( + virtual_overseer: &mut VirtualOverseer, + candidate_indicies: Vec, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + } + ); + + for _ in &candidate_indicies { + recover_available_data(virtual_overseer).await; + fetch_validation_code(virtual_overseer).await; + } + + for _ in &candidate_indicies { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + } + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 2, + })); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { + assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); +} + +async fn handle_approval_on_max_wait_time( + virtual_overseer: &mut VirtualOverseer, + candidate_indicies: Vec, + clock: Box, +) { + const TICK_NOW_BEGIN: u64 = 1; + const MAX_COALESCE_COUNT: u32 = 3; + + clock.inner.lock().set_tick(TICK_NOW_BEGIN); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeAssignment( + _, + c_indices, + )) => { + assert_eq!(TryInto::::try_into(candidate_indicies.clone()).unwrap(), c_indices); + } + ); + + for _ in &candidate_indicies { + recover_available_data(virtual_overseer).await; + fetch_validation_code(virtual_overseer).await; + } + + for _ in &candidate_indicies { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive{exec_kind, response_sender, ..}) if exec_kind == PvfExecKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + .unwrap(); + } + ); + } + + // First time we fetch the configuration when we are ready to approve the first candidate + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: MAX_COALESCE_COUNT, + })); + } + ); + + // Second time we fetch the configuration when we are ready to approve the second candidate + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: MAX_COALESCE_COUNT, + })); + } + ); + + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + // Move the clock just before we should send the approval + clock + .inner + .lock() + .set_tick(MAX_APPROVAL_COALESCE_WAIT_TICKS as Tick + TICK_NOW_BEGIN - 1); + + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); + + // Move the clock tick, so we can trigger a force sending of the approvals + clock + .inner + .lock() + .set_tick(MAX_APPROVAL_COALESCE_WAIT_TICKS as Tick + TICK_NOW_BEGIN); + + // Third time we fetch the configuration when timer expires and we are ready to sent the + // approval + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(_, RuntimeApiRequest::ApprovalVotingParams(_, sender))) => { + let _ = sender.send(Ok(ApprovalVotingParams { + max_approval_coalesce_count: 3, + })); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ApprovalDistribution(ApprovalDistributionMessage::DistributeApproval(vote)) => { + assert_eq!(TryInto::::try_into(candidate_indicies).unwrap(), vote.candidate_indices); + } + ); + + // Assert that there are no more messages being sent by the subsystem + assert!(overseer_recv(virtual_overseer).timeout(TIMEOUT / 2).await.is_none()); +} + +#[test] +fn test_approval_is_sent_on_max_approval_coalesce_wait() { + let assignment_criteria = Box::new(MockAssignmentCriteria( + || { + let mut assignments = HashMap::new(); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: garbage_assignment_cert(AssignmentCertKind::RelayVRFModulo { sample: 0 }) + .into(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let assignments_cert = + garbage_assignment_cert_v2(AssignmentCertKindV2::RelayVRFModuloCompact { + core_bitfield: vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)] + .try_into() + .unwrap(), + }); + let _ = assignments.insert( + CoreIndex(0), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + + let _ = assignments.insert( + CoreIndex(1), + approval_db::v2::OurAssignment { + cert: assignments_cert.clone(), + tranche: 0, + validator_index: ValidatorIndex(0), + triggered: false, + } + .into(), + ); + assignments + }, + |_| Ok(0), + )); + + let config = HarnessConfigBuilder::default().assignment_criteria(assignment_criteria).build(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { mut virtual_overseer, clock, sync_oracle_handle: _sync_oracle_handle } = + test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + + let block_hash = Hash::repeat_byte(0x01); + + let candidate_commitments = CandidateCommitments::default(); + + let candidate_receipt1 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(1_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let candidate_hash1 = candidate_receipt1.hash(); + + let candidate_receipt2 = { + let mut receipt = dummy_candidate_receipt(block_hash); + receipt.descriptor.para_id = ParaId::from(2_u32); + receipt.commitments_hash = candidate_commitments.hash(); + receipt + }; + + let slot = Slot::from(1); + let candidate_index1 = 0; + let candidate_index2 = 1; + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + let session_info = SessionInfo { + validator_groups: IndexedVec::>::from(vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ]), + ..session_info(&validators) + }; + + let candidates = Some(vec![ + (candidate_receipt1.clone(), CoreIndex(0), GroupIndex(0)), + (candidate_receipt2.clone(), CoreIndex(1), GroupIndex(1)), + ]); + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot, + candidates: candidates.clone(), + session_info: Some(session_info.clone()), + }, + ) + .build(&mut virtual_overseer) + .await; + + assert!(!clock.inner.lock().current_wakeup_is(1)); + clock.inner.lock().wakeup_all(1); + + assert!(clock.inner.lock().current_wakeup_is(slot_to_tick(slot))); + clock.inner.lock().wakeup_all(slot_to_tick(slot)); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + clock.inner.lock().wakeup_all(slot_to_tick(slot + 2)); + + assert_eq!(clock.inner.lock().wakeups.len(), 0); + + futures_timer::Delay::new(Duration::from_millis(200)).await; + + let candidate_entry = store.load_candidate_entry(&candidate_hash1).unwrap().unwrap(); + let our_assignment = + candidate_entry.approval_entry(&block_hash).unwrap().our_assignment().unwrap(); + assert!(our_assignment.triggered()); + + handle_approval_on_max_wait_time( + &mut virtual_overseer, + vec![candidate_index1, candidate_index2], + clock, + ) + .await; + + virtual_overseer + }); +} diff --git a/polkadot/node/core/approval-voting/src/time.rs b/polkadot/node/core/approval-voting/src/time.rs index a45866402c82..61091f3c34cd 100644 --- a/polkadot/node/core/approval-voting/src/time.rs +++ b/polkadot/node/core/approval-voting/src/time.rs @@ -16,14 +16,23 @@ //! Time utilities for approval voting. -use futures::prelude::*; +use futures::{ + future::BoxFuture, + prelude::*, + stream::{FusedStream, FuturesUnordered}, + Stream, StreamExt, +}; + use polkadot_node_primitives::approval::v1::DelayTranche; use sp_consensus_slots::Slot; use std::{ + collections::HashSet, pin::Pin, + task::Poll, time::{Duration, SystemTime}, }; +use polkadot_primitives::{Hash, ValidatorIndex}; const TICK_DURATION_MILLIS: u64 = 500; /// A base unit of time, starting from the Unix epoch, split into half-second intervals. @@ -88,3 +97,157 @@ pub(crate) fn slot_number_to_tick(slot_duration_millis: u64, slot: Slot) -> Tick let ticks_per_slot = slot_duration_millis / TICK_DURATION_MILLIS; u64::from(slot) * ticks_per_slot } + +/// A list of delayed futures that gets triggered when the waiting time has expired and it is +/// time to sign the candidate. +/// We have a timer per relay-chain block. +#[derive(Default)] +pub struct DelayedApprovalTimer { + timers: FuturesUnordered>, + blocks: HashSet, +} + +impl DelayedApprovalTimer { + /// Starts a single timer per block hash + /// + /// Guarantees that if a timer already exits for the give block hash, + /// no additional timer is started. + pub(crate) fn maybe_arm_timer( + &mut self, + wait_untill: Tick, + clock: &dyn Clock, + block_hash: Hash, + validator_index: ValidatorIndex, + ) { + if self.blocks.insert(block_hash) { + let clock_wait = clock.wait(wait_untill); + self.timers.push(Box::pin(async move { + clock_wait.await; + (block_hash, validator_index) + })); + } + } +} + +impl Stream for DelayedApprovalTimer { + type Item = (Hash, ValidatorIndex); + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let poll_result = self.timers.poll_next_unpin(cx); + match poll_result { + Poll::Ready(Some(result)) => { + self.blocks.remove(&result.0); + Poll::Ready(Some(result)) + }, + _ => poll_result, + } + } +} + +impl FusedStream for DelayedApprovalTimer { + fn is_terminated(&self) -> bool { + self.timers.is_terminated() + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use futures::{executor::block_on, FutureExt, StreamExt}; + use futures_timer::Delay; + use polkadot_primitives::{Hash, ValidatorIndex}; + + use crate::time::{Clock, SystemClock}; + + use super::DelayedApprovalTimer; + + #[test] + fn test_select_empty_timer() { + block_on(async move { + let mut timer = DelayedApprovalTimer::default(); + + for _ in 1..10 { + let result = futures::select!( + _ = timer.select_next_some() => { + 0 + } + // Only this arm should fire + _ = Delay::new(Duration::from_millis(100)).fuse() => { + 1 + } + ); + + assert_eq!(result, 1); + } + }); + } + + #[test] + fn test_timer_functionality() { + block_on(async move { + let mut timer = DelayedApprovalTimer::default(); + let test_hashes = + vec![Hash::repeat_byte(0x01), Hash::repeat_byte(0x02), Hash::repeat_byte(0x03)]; + for (index, hash) in test_hashes.iter().enumerate() { + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + } + let timeout_hash = Hash::repeat_byte(0x02); + for i in 0..test_hashes.len() * 2 { + let result = futures::select!( + (hash, _) = timer.select_next_some() => { + hash + } + // Timers should fire only once, so for the rest of the iterations we should timeout through here. + _ = Delay::new(Duration::from_secs(2)).fuse() => { + timeout_hash + } + ); + assert_eq!(test_hashes.get(i).cloned().unwrap_or(timeout_hash), result); + } + + // Now check timer can be restarted if already fired + for (index, hash) in test_hashes.iter().enumerate() { + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + timer.maybe_arm_timer( + SystemClock.tick_now() + index as u64, + &SystemClock, + *hash, + ValidatorIndex::from(2), + ); + } + + for i in 0..test_hashes.len() * 2 { + let result = futures::select!( + (hash, _) = timer.select_next_some() => { + hash + } + // Timers should fire only once, so for the rest of the iterations we should timeout through here. + _ = Delay::new(Duration::from_secs(2)).fuse() => { + timeout_hash + } + ); + assert_eq!(test_hashes.get(i).cloned().unwrap_or(timeout_hash), result); + } + }); + } +} diff --git a/polkadot/node/core/dispute-coordinator/src/import.rs b/polkadot/node/core/dispute-coordinator/src/import.rs index 837ad7856e73..98c12bd509b4 100644 --- a/polkadot/node/core/dispute-coordinator/src/import.rs +++ b/polkadot/node/core/dispute-coordinator/src/import.rs @@ -34,9 +34,9 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::overseer; use polkadot_node_subsystem_util::runtime::RuntimeInfo; use polkadot_primitives::{ - CandidateReceipt, DisputeStatement, ExecutorParams, Hash, IndexedVec, SessionIndex, - SessionInfo, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, - ValidatorSignature, + CandidateHash, CandidateReceipt, DisputeStatement, ExecutorParams, Hash, IndexedVec, + SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, + ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; @@ -126,7 +126,9 @@ impl OwnVoteState { let our_valid_votes = controlled_indices .iter() .filter_map(|i| votes.valid.raw().get_key_value(i)) - .map(|(index, (kind, sig))| (*index, (DisputeStatement::Valid(*kind), sig.clone()))); + .map(|(index, (kind, sig))| { + (*index, (DisputeStatement::Valid(kind.clone()), sig.clone())) + }); let our_invalid_votes = controlled_indices .iter() .filter_map(|i| votes.invalid.get_key_value(i)) @@ -305,7 +307,7 @@ impl CandidateVoteState { DisputeStatement::Valid(valid_kind) => { let fresh = votes.valid.insert_vote( val_index, - *valid_kind, + valid_kind.clone(), statement.into_validator_signature(), ); if fresh { @@ -511,7 +513,7 @@ impl ImportResult { pub fn import_approval_votes( self, env: &CandidateEnvironment, - approval_votes: HashMap, + approval_votes: HashMap, ValidatorSignature)>, now: Timestamp, ) -> Self { let Self { @@ -525,19 +527,33 @@ impl ImportResult { let (mut votes, _) = new_state.into_old_state(); - for (index, sig) in approval_votes.into_iter() { + for (index, (candidate_hashes, sig)) in approval_votes.into_iter() { debug_assert!( { let pub_key = &env.session_info().validators.get(index).expect("indices are validated by approval-voting subsystem; qed"); - let candidate_hash = votes.candidate_receipt.hash(); let session_index = env.session_index(); - DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) - .check_signature(pub_key, candidate_hash, session_index, &sig) + candidate_hashes.contains(&votes.candidate_receipt.hash()) && DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes.clone())) + .check_signature(pub_key, *candidate_hashes.first().expect("Valid votes have at least one candidate; qed"), session_index, &sig) .is_ok() }, "Signature check for imported approval votes failed! This is a serious bug. Session: {:?}, candidate hash: {:?}, validator index: {:?}", env.session_index(), votes.candidate_receipt.hash(), index ); - if votes.valid.insert_vote(index, ValidDisputeStatementKind::ApprovalChecking, sig) { + if votes.valid.insert_vote( + index, + // There is a hidden dependency here between approval-voting and this subsystem. + // We should be able to start emitting + // ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates only after: + // 1. Runtime have been upgraded to know about the new format. + // 2. All nodes have been upgraded to know about the new format. + // Once those two requirements have been met we should be able to increase + // max_approval_coalesce_count to values greater than 1. + if candidate_hashes.len() > 1 { + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes) + } else { + ValidDisputeStatementKind::ApprovalChecking + }, + sig, + ) { imported_valid_votes += 1; imported_approval_votes += 1; } diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index e44530b3f1bb..d9cd4e39d3cb 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -642,7 +642,7 @@ impl Initialized { }; debug_assert!( SignedDisputeStatement::new_checked( - DisputeStatement::Valid(valid_statement_kind), + DisputeStatement::Valid(valid_statement_kind.clone()), candidate_hash, session, validator_public.clone(), @@ -656,7 +656,7 @@ impl Initialized { ); let signed_dispute_statement = SignedDisputeStatement::new_unchecked_from_trusted_source( - DisputeStatement::Valid(valid_statement_kind), + DisputeStatement::Valid(valid_statement_kind.clone()), candidate_hash, session, validator_public, diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index e96fee812409..5067d3673da9 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -576,7 +576,7 @@ pub fn make_dispute_message( .next() .ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( - DisputeStatement::Valid(*statement_kind), + DisputeStatement::Valid(statement_kind.clone()), *our_vote.candidate_hash(), our_vote.session_index(), validators diff --git a/polkadot/node/core/dispute-coordinator/src/tests.rs b/polkadot/node/core/dispute-coordinator/src/tests.rs index 6912bbf67e17..da449773fe8f 100644 --- a/polkadot/node/core/dispute-coordinator/src/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/tests.rs @@ -661,7 +661,7 @@ fn make_candidate_included_event(candidate_receipt: CandidateReceipt) -> Candida pub async fn handle_approval_vote_request( ctx_handle: &mut VirtualOverseer, expected_hash: &CandidateHash, - votes_to_send: HashMap, + votes_to_send: HashMap, ValidatorSignature)>, ) { assert_matches!( ctx_handle.recv().await, @@ -868,9 +868,12 @@ fn approval_vote_import_works() { .await; gum::trace!("After sending `ImportStatements`"); - let approval_votes = [(ValidatorIndex(4), approval_vote.into_validator_signature())] - .into_iter() - .collect(); + let approval_votes = [( + ValidatorIndex(4), + (vec![candidate_receipt1.hash()], approval_vote.into_validator_signature()), + )] + .into_iter() + .collect(); handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, approval_votes) .await; diff --git a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index 096b73d271a8..cb55ce39bc89 100644 --- a/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/polkadot/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -221,7 +221,7 @@ where votes.valid.retain(|validator_idx, (statement_kind, _)| { is_vote_worth_to_keep( validator_idx, - DisputeStatement::Valid(*statement_kind), + DisputeStatement::Valid(statement_kind.clone()), &onchain_state, ) }); diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 8a7a3dc08b81..5eca551db0a6 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,12 +20,13 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, slashing, + vstaging::{self, ApprovalVotingParams}, + AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -68,6 +69,7 @@ pub(crate) struct RequestResultCache { para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, node_features: LruMap, + approval_voting_params: LruMap, } impl Default for RequestResultCache { @@ -98,6 +100,7 @@ impl Default for RequestResultCache { unapplied_slashes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), key_ownership_proof: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), minimum_backing_votes: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + approval_voting_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), disabled_validators: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), @@ -507,6 +510,21 @@ impl RequestResultCache { ) { self.async_backing_params.insert(key, value); } + + pub(crate) fn approval_voting_params( + &mut self, + key: (Hash, SessionIndex), + ) -> Option<&ApprovalVotingParams> { + self.approval_voting_params.get(&key.1).map(|v| &*v) + } + + pub(crate) fn cache_approval_voting_params( + &mut self, + session_index: SessionIndex, + value: ApprovalVotingParams, + ) { + self.approval_voting_params.insert(session_index, value); + } } pub(crate) enum RequestResult { @@ -554,6 +572,7 @@ pub(crate) enum RequestResult { slashing::OpaqueKeyOwnershipProof, Option<()>, ), + ApprovalVotingParams(Hash, SessionIndex, ApprovalVotingParams), DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index 8689355c4139..4bedfd827340 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -165,6 +165,8 @@ where KeyOwnershipProof(relay_parent, validator_id, key_ownership_proof) => self .requests_cache .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), + RequestResult::ApprovalVotingParams(_relay_parent, session_index, params) => + self.requests_cache.cache_approval_voting_params(session_index, params), SubmitReportDisputeLost(_, _, _, _) => {}, DisabledValidators(relay_parent, disabled_validators) => self.requests_cache.cache_disabled_validators(relay_parent, disabled_validators), @@ -300,6 +302,9 @@ where Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) }, ), + Request::ApprovalVotingParams(session_index, sender) => + query!(approval_voting_params(session_index), sender) + .map(|sender| Request::ApprovalVotingParams(session_index, sender)), Request::DisabledValidators(sender) => query!(disabled_validators(), sender) .map(|sender| Request::DisabledValidators(sender)), Request::ParaBackingState(para, sender) => query!(para_backing_state(para), sender) @@ -571,6 +576,14 @@ where ver = Request::KEY_OWNERSHIP_PROOF_RUNTIME_REQUIREMENT, sender ), + Request::ApprovalVotingParams(session_index, sender) => { + query!( + ApprovalVotingParams, + approval_voting_params(session_index), + ver = Request::APPROVAL_VOTING_PARAMS_REQUIREMENT, + sender + ) + }, Request::SubmitReportDisputeLost(dispute_proof, key_ownership_proof, sender) => query!( SubmitReportDisputeLost, submit_report_dispute_lost(dispute_proof, key_ownership_proof), diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index b939bffb0e7f..f91723b3d39e 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,12 +20,13 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, vstaging::NodeFeatures, AuthorityDiscoveryId, BlockNumber, - CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, - DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + Slot, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; @@ -242,6 +243,15 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { todo!("Not required for tests") } + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + _: Hash, + _: SessionIndex, + ) -> Result { + todo!("Not required for tests") + } + async fn current_epoch(&self, _: Hash) -> Result { Ok(self.babe_epoch.as_ref().unwrap().clone()) } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 47482eef7640..d520febaef51 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -32,14 +32,15 @@ use polkadot_node_network_protocol::{ self as net_protocol, filter_by_peer_version, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, peer_set::MAX_NOTIFICATION_SIZE, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::approval::{ - v1::{ - AssignmentCertKind, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, + v1::{AssignmentCertKind, BlockApprovalMeta, IndirectAssignmentCert}, + v2::{ + AsBitIndex, AssignmentCertKindV2, CandidateBitfield, IndirectAssignmentCertV2, + IndirectSignedApprovalVoteV2, }, - v2::{AsBitIndex, AssignmentCertKindV2, CandidateBitfield, IndirectAssignmentCertV2}, }; use polkadot_node_subsystem::{ messages::{ @@ -113,6 +114,14 @@ struct ApprovalRouting { required_routing: RequiredRouting, local: bool, random_routing: RandomRouting, + peers_randomly_routed: Vec, +} + +impl ApprovalRouting { + fn mark_randomly_sent(&mut self, peer: PeerId) { + self.random_routing.inc_sent(); + self.peers_randomly_routed.push(peer); + } } // This struct is responsible for tracking the full state of an assignment and grid routing @@ -121,9 +130,9 @@ struct ApprovalEntry { // The assignment certificate. assignment: IndirectAssignmentCertV2, // The candidates claimed by the certificate. A mapping between bit index and candidate index. - candidates: CandidateBitfield, + assignment_claimed_candidates: CandidateBitfield, // The approval signatures for each `CandidateIndex` claimed by the assignment certificate. - approvals: HashMap, + approvals: HashMap, // The validator index of the assignment signer. validator_index: ValidatorIndex, // Information required for gossiping to other peers using the grid topology. @@ -136,6 +145,8 @@ enum ApprovalEntryError { CandidateIndexOutOfBounds, InvalidCandidateIndex, DuplicateApproval, + UnknownAssignment, + AssignmentsFollowedDifferentPaths(RequiredRouting, RequiredRouting), } impl ApprovalEntry { @@ -148,7 +159,7 @@ impl ApprovalEntry { validator_index: assignment.validator, assignment, approvals: HashMap::with_capacity(candidates.len()), - candidates, + assignment_claimed_candidates: candidates, routing_info, } } @@ -156,23 +167,15 @@ impl ApprovalEntry { // Create a `MessageSubject` to reference the assignment. pub fn create_assignment_knowledge(&self, block_hash: Hash) -> (MessageSubject, MessageKind) { ( - MessageSubject(block_hash, self.candidates.clone(), self.validator_index), + MessageSubject( + block_hash, + self.assignment_claimed_candidates.clone(), + self.validator_index, + ), MessageKind::Assignment, ) } - // Create a `MessageSubject` to reference the approval. - pub fn create_approval_knowledge( - &self, - block_hash: Hash, - candidate_index: CandidateIndex, - ) -> (MessageSubject, MessageKind) { - ( - MessageSubject(block_hash, candidate_index.into(), self.validator_index), - MessageKind::Approval, - ) - } - // Updates routing information and returns the previous information if any. pub fn routing_info_mut(&mut self) -> &mut ApprovalRouting { &mut self.routing_info @@ -188,11 +191,21 @@ impl ApprovalEntry { self.routing_info.required_routing = required_routing; } + // Tells if this entry assignment covers at least one candidate in the approval + pub fn includes_approval_candidates(&self, approval: &IndirectSignedApprovalVoteV2) -> bool { + for candidate_index in approval.candidate_indices.iter_ones() { + if self.assignment_claimed_candidates.bit_at((candidate_index).as_bit_index()) { + return true + } + } + return false + } + // Records a new approval. Returns error if the claimed candidate is not found or we already // have received the approval. pub fn note_approval( &mut self, - approval: IndirectSignedApprovalVote, + approval: IndirectSignedApprovalVoteV2, ) -> Result<(), ApprovalEntryError> { // First do some sanity checks: // - check validator index matches @@ -202,37 +215,29 @@ impl ApprovalEntry { return Err(ApprovalEntryError::InvalidValidatorIndex) } - if self.candidates.len() <= approval.candidate_index as usize { - return Err(ApprovalEntryError::CandidateIndexOutOfBounds) - } - - if !self.candidates.bit_at(approval.candidate_index.as_bit_index()) { + // We need at least one of the candidates in the approval to be in this assignment + if !self.includes_approval_candidates(&approval) { return Err(ApprovalEntryError::InvalidCandidateIndex) } - if self.approvals.contains_key(&approval.candidate_index) { + if self.approvals.contains_key(&approval.candidate_indices) { return Err(ApprovalEntryError::DuplicateApproval) } - self.approvals.insert(approval.candidate_index, approval); + self.approvals.insert(approval.candidate_indices.clone(), approval.clone()); Ok(()) } // Get the assignment certiticate and claimed candidates. pub fn assignment(&self) -> (IndirectAssignmentCertV2, CandidateBitfield) { - (self.assignment.clone(), self.candidates.clone()) + (self.assignment.clone(), self.assignment_claimed_candidates.clone()) } // Get all approvals for all candidates claimed by the assignment. - pub fn approvals(&self) -> Vec { + pub fn approvals(&self) -> Vec { self.approvals.values().cloned().collect::>() } - // Get the approval for a specific candidate index. - pub fn approval(&self, candidate_index: CandidateIndex) -> Option { - self.approvals.get(&candidate_index).cloned() - } - // Get validator index. pub fn validator_index(&self) -> ValidatorIndex { self.validator_index @@ -430,6 +435,41 @@ impl PeerKnowledge { fn contains(&self, message: &MessageSubject, kind: MessageKind) -> bool { self.sent.contains(message, kind) || self.received.contains(message, kind) } + + // Generate the knowledge keys for querying if all assignments of an approval are known + // by this peer. + fn generate_assignments_keys( + approval: &IndirectSignedApprovalVoteV2, + ) -> Vec<(MessageSubject, MessageKind)> { + approval + .candidate_indices + .iter_ones() + .map(|candidate_index| { + ( + MessageSubject( + approval.block_hash, + (candidate_index as CandidateIndex).into(), + approval.validator, + ), + MessageKind::Assignment, + ) + }) + .collect_vec() + } + + // Generate the knowledge keys for querying if an approval is known by peer. + fn generate_approval_key( + approval: &IndirectSignedApprovalVoteV2, + ) -> (MessageSubject, MessageKind) { + ( + MessageSubject( + approval.block_hash, + approval.candidate_indices.clone(), + approval.validator, + ), + MessageKind::Approval, + ) + } } /// Information about blocks in our current view as well as whether peers know of them. @@ -462,13 +502,13 @@ impl BlockEntry { // First map one entry per candidate to the same key we will use in `approval_entries`. // Key is (Validator_index, CandidateBitfield) that links the `ApprovalEntry` to the (K,V) // entry in `candidate_entry.messages`. - for claimed_candidate_index in entry.candidates.iter_ones() { + for claimed_candidate_index in entry.assignment_claimed_candidates.iter_ones() { match self.candidates.get_mut(claimed_candidate_index) { Some(candidate_entry) => { candidate_entry - .messages + .assignments .entry(entry.validator_index()) - .or_insert(entry.candidates.clone()); + .or_insert(entry.assignment_claimed_candidates.clone()); }, None => { // This should never happen, but if it happens, it means the subsystem is @@ -484,50 +524,107 @@ impl BlockEntry { } self.approval_entries - .entry((entry.validator_index, entry.candidates.clone())) + .entry((entry.validator_index, entry.assignment_claimed_candidates.clone())) .or_insert(entry) } - // Returns a mutable reference of `ApprovalEntry` for `candidate_index` from validator - // `validator_index`. - pub fn approval_entry( + // Tels if all candidate_indices are valid candidates + pub fn contains_candidates(&self, candidate_indices: &CandidateBitfield) -> bool { + candidate_indices + .iter_ones() + .all(|candidate_index| self.candidates.get(candidate_index as usize).is_some()) + } + + // Saves the given approval in all ApprovalEntries that contain an assignment for any of the + // candidates in the approval. + // + // Returns the required routing needed for this approval and the lit of random peers the + // covering assignments were sent. + pub fn note_approval( &mut self, - candidate_index: CandidateIndex, - validator_index: ValidatorIndex, - ) -> Option<&mut ApprovalEntry> { - self.candidates - .get(candidate_index as usize) - .map_or(None, |candidate_entry| candidate_entry.messages.get(&validator_index)) - .map_or(None, |candidate_indices| { - self.approval_entries.get_mut(&(validator_index, candidate_indices.clone())) + approval: IndirectSignedApprovalVoteV2, + ) -> Result<(RequiredRouting, HashSet), ApprovalEntryError> { + let mut required_routing = None; + let mut peers_randomly_routed_to = HashSet::new(); + + if self.candidates.len() < approval.candidate_indices.len() as usize { + return Err(ApprovalEntryError::CandidateIndexOutOfBounds) + } + + // First determine all assignments bitfields that might be covered by this approval + let covered_assignments_bitfields: HashSet = approval + .candidate_indices + .iter_ones() + .filter_map(|candidate_index| { + self.candidates.get_mut(candidate_index).map_or(None, |candidate_entry| { + candidate_entry.assignments.get(&approval.validator).cloned() + }) }) - } + .collect(); - // Get all approval entries for a given candidate. - pub fn approval_entries(&self, candidate_index: CandidateIndex) -> Vec<&ApprovalEntry> { - // Get the keys for fetching `ApprovalEntry` from `self.approval_entries`, - let approval_entry_keys = self - .candidates - .get(candidate_index as usize) - .map(|candidate_entry| &candidate_entry.messages); - - if let Some(approval_entry_keys) = approval_entry_keys { - // Ensure no duplicates. - let approval_entry_keys = approval_entry_keys.iter().unique().collect::>(); - - let mut entries = Vec::new(); - for (validator_index, candidate_indices) in approval_entry_keys { - if let Some(entry) = - self.approval_entries.get(&(*validator_index, candidate_indices.clone())) - { - entries.push(entry); + // Mark the vote in all approval entries + for assignment_bitfield in covered_assignments_bitfields { + if let Some(approval_entry) = + self.approval_entries.get_mut(&(approval.validator, assignment_bitfield)) + { + approval_entry.note_approval(approval.clone())?; + peers_randomly_routed_to + .extend(approval_entry.routing_info().peers_randomly_routed.iter()); + + if let Some(required_routing) = required_routing { + if required_routing != approval_entry.routing_info().required_routing { + // This shouldn't happen since the required routing is computed based on the + // validator_index, so two assignments from the same validators will have + // the same required routing. + return Err(ApprovalEntryError::AssignmentsFollowedDifferentPaths( + required_routing, + approval_entry.routing_info().required_routing, + )) + } + } else { + required_routing = Some(approval_entry.routing_info().required_routing) } } - entries + } + + if let Some(required_routing) = required_routing { + Ok((required_routing, peers_randomly_routed_to)) } else { - vec![] + Err(ApprovalEntryError::UnknownAssignment) } } + + /// Returns the list of approval votes covering this candidate + pub fn approval_votes( + &self, + candidate_index: CandidateIndex, + ) -> Vec { + let result: Option< + HashMap<(ValidatorIndex, CandidateBitfield), IndirectSignedApprovalVoteV2>, + > = self.candidates.get(candidate_index as usize).map(|candidate_entry| { + candidate_entry + .assignments + .iter() + .filter_map(|(validator, assignment_bitfield)| { + self.approval_entries.get(&(*validator, assignment_bitfield.clone())) + }) + .flat_map(|approval_entry| { + approval_entry + .approvals + .clone() + .into_iter() + .filter(|(approved_candidates, _)| { + approved_candidates.bit_at(candidate_index.as_bit_index()) + }) + .map(|(approved_candidates, vote)| { + ((approval_entry.validator_index, approved_candidates), vote) + }) + }) + .collect() + }); + + result.map(|result| result.into_values().collect_vec()).unwrap_or_default() + } } // Information about candidates in the context of a particular block they are included in. @@ -537,7 +634,7 @@ impl BlockEntry { struct CandidateEntry { // The value represents part of the lookup key in `approval_entries` to fetch the assignment // and existing votes. - messages: HashMap, + assignments: HashMap, } #[derive(Debug, Clone, PartialEq)] @@ -557,7 +654,7 @@ impl MessageSource { enum PendingMessage { Assignment(IndirectAssignmentCertV2, CandidateBitfield), - Approval(IndirectSignedApprovalVote), + Approval(IndirectSignedApprovalVoteV2), } #[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)] @@ -830,6 +927,49 @@ impl State { } } + // Entry point for processing an approval coming from a peer. + async fn process_incoming_approvals( + &mut self, + ctx: &mut Context, + metrics: &Metrics, + peer_id: PeerId, + approvals: Vec, + ) { + gum::trace!( + target: LOG_TARGET, + peer_id = %peer_id, + num = approvals.len(), + "Processing approvals from a peer", + ); + for approval_vote in approvals.into_iter() { + if let Some(pending) = self.pending_known.get_mut(&approval_vote.block_hash) { + let block_hash = approval_vote.block_hash; + let validator_index = approval_vote.validator; + + gum::trace!( + target: LOG_TARGET, + %peer_id, + ?block_hash, + ?validator_index, + "Pending assignment candidates {:?}", + approval_vote.candidate_indices, + ); + + pending.push((peer_id, PendingMessage::Approval(approval_vote))); + + continue + } + + self.import_and_circulate_approval( + ctx, + metrics, + MessageSource::Peer(peer_id), + approval_vote, + ) + .await; + } + } + async fn process_incoming_peer_message( &mut self, ctx: &mut Context, @@ -838,16 +978,14 @@ impl State { msg: Versioned< protocol_v1::ApprovalDistributionMessage, protocol_v2::ApprovalDistributionMessage, - protocol_vstaging::ApprovalDistributionMessage, + protocol_v3::ApprovalDistributionMessage, >, rng: &mut R, ) where R: CryptoRng + Rng, { match msg { - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( - assignments, - )) => { + Versioned::V3(protocol_v3::ApprovalDistributionMessage::Assignments(assignments)) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -887,45 +1025,18 @@ impl State { ) .await; }, - Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( - approvals, - )) | + Versioned::V3(protocol_v3::ApprovalDistributionMessage::Approvals(approvals)) => { + self.process_incoming_approvals(ctx, metrics, peer_id, approvals).await; + }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => { - gum::trace!( - target: LOG_TARGET, - peer_id = %peer_id, - num = approvals.len(), - "Processing approvals from a peer", - ); - for approval_vote in approvals.into_iter() { - if let Some(pending) = self.pending_known.get_mut(&approval_vote.block_hash) { - let block_hash = approval_vote.block_hash; - let candidate_index = approval_vote.candidate_index; - let validator_index = approval_vote.validator; - - gum::trace!( - target: LOG_TARGET, - %peer_id, - ?block_hash, - ?candidate_index, - ?validator_index, - "Pending assignment", - ); - - pending.push((peer_id, PendingMessage::Approval(approval_vote))); - - continue - } - - self.import_and_circulate_approval( - ctx, - metrics, - MessageSource::Peer(peer_id), - approval_vote, - ) - .await; - } + self.process_incoming_approvals( + ctx, + metrics, + peer_id, + approvals.into_iter().map(|approval| approval.into()).collect::>(), + ) + .await; }, } } @@ -1071,8 +1182,11 @@ impl State { COST_UNEXPECTED_MESSAGE, ) .await; + gum::debug!(target: LOG_TARGET, "Received assignment for invalid block"); + metrics.on_assignment_recent_outdated(); } } + metrics.on_assignment_invalid_block(); return }, }; @@ -1105,6 +1219,7 @@ impl State { COST_DUPLICATE_MESSAGE, ) .await; + metrics.on_assignment_duplicate(); } else { gum::trace!( target: LOG_TARGET, @@ -1132,6 +1247,7 @@ impl State { COST_UNEXPECTED_MESSAGE, ) .await; + metrics.on_assignment_out_of_view(); }, } @@ -1148,6 +1264,7 @@ impl State { gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known assignment"); peer_knowledge.received.insert(message_subject, message_kind); } + metrics.on_assignment_good_known(); return } @@ -1204,6 +1321,8 @@ impl State { ?peer_id, "Got an `AcceptedDuplicate` assignment", ); + metrics.on_assignment_duplicatevoting(); + return }, AssignmentCheckResult::TooFarInFuture => { @@ -1220,6 +1339,8 @@ impl State { COST_ASSIGNMENT_TOO_FAR_IN_THE_FUTURE, ) .await; + metrics.on_assignment_far(); + return }, AssignmentCheckResult::Bad(error) => { @@ -1237,6 +1358,7 @@ impl State { COST_INVALID_MESSAGE, ) .await; + metrics.on_assignment_bad(); return }, } @@ -1275,7 +1397,12 @@ impl State { let approval_entry = entry.insert_approval_entry(ApprovalEntry::new( assignment.clone(), claimed_candidate_indices.clone(), - ApprovalRouting { required_routing, local, random_routing: Default::default() }, + ApprovalRouting { + required_routing, + local, + random_routing: Default::default(), + peers_randomly_routed: Default::default(), + }, )); // Dispatch the message to all peers in the routing set which @@ -1305,6 +1432,10 @@ impl State { continue } + if !topology.map(|topology| topology.is_validator(&peer)).unwrap_or(false) { + continue + } + // Note: at this point, we haven't received the message from any peers // other than the source peer, and we just got it, so we haven't sent it // to any peers either. @@ -1312,7 +1443,7 @@ impl State { approval_entry.routing_info().random_routing.sample(n_peers_total, rng); if route_random { - approval_entry.routing_info_mut().random_routing.inc_sent(); + approval_entry.routing_info_mut().mark_randomly_sent(peer); peers.push(peer); } } @@ -1346,12 +1477,94 @@ impl State { } } + // Checks if an approval can be processed. + // Returns true if we can continue with processing the approval and false otherwise. + async fn check_approval_can_be_processed( + ctx: &mut Context, + assignments_knowledge_key: &Vec<(MessageSubject, MessageKind)>, + approval_knowledge_key: &(MessageSubject, MessageKind), + entry: &mut BlockEntry, + reputation: &mut ReputationAggregator, + peer_id: PeerId, + metrics: &Metrics, + ) -> bool { + for message_subject in assignments_knowledge_key { + if !entry.knowledge.contains(&message_subject.0, message_subject.1) { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + ?message_subject, + "Unknown approval assignment", + ); + modify_reputation(reputation, ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; + metrics.on_approval_unknown_assignment(); + return false + } + } + + // check if our knowledge of the peer already contains this approval + match entry.known_by.entry(peer_id) { + hash_map::Entry::Occupied(mut knowledge) => { + let peer_knowledge = knowledge.get_mut(); + if peer_knowledge.contains(&approval_knowledge_key.0, approval_knowledge_key.1) { + if !peer_knowledge + .received + .insert(approval_knowledge_key.0.clone(), approval_knowledge_key.1) + { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + ?approval_knowledge_key, + "Duplicate approval", + ); + + modify_reputation( + reputation, + ctx.sender(), + peer_id, + COST_DUPLICATE_MESSAGE, + ) + .await; + metrics.on_approval_duplicate(); + } + return false + } + }, + hash_map::Entry::Vacant(_) => { + gum::debug!( + target: LOG_TARGET, + ?peer_id, + ?approval_knowledge_key, + "Approval from a peer is out of view", + ); + modify_reputation(reputation, ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; + metrics.on_approval_out_of_view(); + }, + } + + if entry.knowledge.contains(&approval_knowledge_key.0, approval_knowledge_key.1) { + if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { + peer_knowledge + .received + .insert(approval_knowledge_key.0.clone(), approval_knowledge_key.1); + } + + // We already processed this approval no need to continue. + gum::trace!(target: LOG_TARGET, ?peer_id, ?approval_knowledge_key, "Known approval"); + metrics.on_approval_good_known(); + modify_reputation(reputation, ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await; + false + } else { + true + } + } + async fn import_and_circulate_approval( &mut self, ctx: &mut Context, metrics: &Metrics, source: MessageSource, - vote: IndirectSignedApprovalVote, + vote: IndirectSignedApprovalVoteV2, ) { let _span = self .spans @@ -1370,10 +1583,9 @@ impl State { let block_hash = vote.block_hash; let validator_index = vote.validator; - let candidate_index = vote.candidate_index; - + let candidate_indices = &vote.candidate_indices; let entry = match self.blocks.get_mut(&block_hash) { - Some(entry) if entry.candidates.get(candidate_index as usize).is_some() => entry, + Some(entry) if entry.contains_candidates(&vote.candidate_indices) => entry, _ => { if let Some(peer_id) = source.peer_id() { if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) { @@ -1382,7 +1594,7 @@ impl State { ?peer_id, ?block_hash, ?validator_index, - ?candidate_index, + ?candidate_indices, "Approval from a peer is out of view", ); modify_reputation( @@ -1392,6 +1604,9 @@ impl State { COST_UNEXPECTED_MESSAGE, ) .await; + metrics.on_approval_invalid_block(); + } else { + metrics.on_approval_recent_outdated(); } } return @@ -1399,81 +1614,21 @@ impl State { }; // compute metadata on the assignment. - let message_subject = MessageSubject(block_hash, candidate_index.into(), validator_index); - let message_kind = MessageKind::Approval; + let assignments_knowledge_keys = PeerKnowledge::generate_assignments_keys(&vote); + let approval_knwowledge_key = PeerKnowledge::generate_approval_key(&vote); if let Some(peer_id) = source.peer_id() { - if !entry.knowledge.contains(&message_subject, MessageKind::Assignment) { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Unknown approval assignment", - ); - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - COST_UNEXPECTED_MESSAGE, - ) - .await; - return - } - - // check if our knowledge of the peer already contains this approval - match entry.known_by.entry(peer_id) { - hash_map::Entry::Occupied(mut knowledge) => { - let peer_knowledge = knowledge.get_mut(); - if peer_knowledge.contains(&message_subject, message_kind) { - if !peer_knowledge.received.insert(message_subject.clone(), message_kind) { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Duplicate approval", - ); - - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - COST_DUPLICATE_MESSAGE, - ) - .await; - } - return - } - }, - hash_map::Entry::Vacant(_) => { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Approval from a peer is out of view", - ); - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - COST_UNEXPECTED_MESSAGE, - ) - .await; - }, - } - - // if the approval is known to be valid, reward the peer - if entry.knowledge.contains(&message_subject, message_kind) { - gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known approval"); - modify_reputation( - &mut self.reputation, - ctx.sender(), - peer_id, - BENEFIT_VALID_MESSAGE, - ) - .await; - if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { - peer_knowledge.received.insert(message_subject.clone(), message_kind); - } + if !Self::check_approval_can_be_processed( + ctx, + &assignments_knowledge_keys, + &approval_knwowledge_key, + entry, + &mut self.reputation, + peer_id, + metrics, + ) + .await + { return } @@ -1495,8 +1650,8 @@ impl State { gum::trace!( target: LOG_TARGET, ?peer_id, - ?message_subject, ?result, + ?vote, "Checked approval", ); match result { @@ -1509,9 +1664,13 @@ impl State { ) .await; - entry.knowledge.insert(message_subject.clone(), message_kind); + entry + .knowledge + .insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { - peer_knowledge.received.insert(message_subject.clone(), message_kind); + peer_knowledge + .received + .insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1); } }, ApprovalCheckResult::Bad(error) => { @@ -1528,74 +1687,55 @@ impl State { %error, "Got a bad approval from peer", ); + metrics.on_approval_bad(); return }, } } else { - if !entry.knowledge.insert(message_subject.clone(), message_kind) { - // if we already imported an approval, there is no need to distribute it again + if !entry + .knowledge + .insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1) + { + // if we already imported all approvals, there is no need to distribute it again gum::warn!( target: LOG_TARGET, - ?message_subject, "Importing locally an already known approval", ); return } else { gum::debug!( target: LOG_TARGET, - ?message_subject, "Importing locally a new approval", ); } } - let required_routing = match entry.approval_entry(candidate_index, validator_index) { - Some(approval_entry) => { - // Invariant: to our knowledge, none of the peers except for the `source` know about - // the approval. - metrics.on_approval_imported(); - - if let Err(err) = approval_entry.note_approval(vote.clone()) { - // this would indicate a bug in approval-voting: - // - validator index mismatch - // - candidate index mismatch - // - duplicate approval - gum::warn!( - target: LOG_TARGET, - hash = ?block_hash, - ?candidate_index, - ?validator_index, - ?err, - "Possible bug: Vote import failed", - ); - - return - } - - approval_entry.routing_info().required_routing - }, - None => { - let peer_id = source.peer_id(); - // This indicates a bug in approval-distribution, since we check the knowledge at - // the begining of the function. + let (required_routing, peers_randomly_routed_to) = match entry.note_approval(vote.clone()) { + Ok(required_routing) => required_routing, + Err(err) => { gum::warn!( target: LOG_TARGET, - ?peer_id, - ?message_subject, - "Unknown approval assignment", + hash = ?block_hash, + validator_index = ?vote.validator, + candidate_bitfield = ?vote.candidate_indices, + ?err, + "Possible bug: Vote import failed", ); - // No rep change as this is caused by an issue + metrics.on_approval_bug(); return }, }; + // Invariant: to our knowledge, none of the peers except for the `source` know about the + // approval. + metrics.on_approval_imported(); + // Dispatch a ApprovalDistributionV1Message::Approval(vote) // to all peers required by the topology, with the exception of the source peer. let topology = self.topologies.get_topology(entry.session); let source_peer = source.peer_id(); - let message_subject = &message_subject; - let peer_filter = move |peer, knowledge: &PeerKnowledge| { + let peer_filter = move |peer| { if Some(peer) == source_peer.as_ref() { return false } @@ -1611,13 +1751,13 @@ impl State { // 3. Any randomly selected peers have been sent the assignment already. let in_topology = topology .map_or(false, |t| t.local_grid_neighbors().route_to_peer(required_routing, peer)); - in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) + in_topology || peers_randomly_routed_to.contains(peer) }; let peers = entry .known_by .iter() - .filter(|(p, k)| peer_filter(p, k)) + .filter(|(p, _)| peer_filter(p)) .filter_map(|(p, _)| self.peer_views.get(p).map(|entry| (*p, entry.version))) .collect::>(); @@ -1625,7 +1765,7 @@ impl State { for peer in peers.iter() { // we already filtered peers above, so this should always be Some if let Some(entry) = entry.known_by.get_mut(&peer.0) { - entry.sent.insert(message_subject.clone(), message_kind); + entry.sent.insert(approval_knwowledge_key.0.clone(), approval_knwowledge_key.1); } } @@ -1634,7 +1774,6 @@ impl State { gum::trace!( target: LOG_TARGET, ?block_hash, - ?candidate_index, local = source.peer_id().is_none(), num_peers = peers.len(), "Sending an approval to peers", @@ -1647,7 +1786,7 @@ impl State { fn get_approval_signatures( &mut self, indices: HashSet<(Hash, CandidateIndex)>, - ) -> HashMap { + ) -> HashMap, ValidatorSignature)> { let mut all_sigs = HashMap::new(); for (hash, index) in indices { let _span = self @@ -1670,11 +1809,20 @@ impl State { Some(e) => e, }; - let sigs = block_entry - .approval_entries(index) - .into_iter() - .filter_map(|approval_entry| approval_entry.approval(index)) - .map(|approval| (approval.validator, approval.signature)); + let sigs = block_entry.approval_votes(index).into_iter().map(|approval| { + ( + approval.validator, + ( + hash, + approval + .candidate_indices + .iter_ones() + .map(|val| val as CandidateIndex) + .collect_vec(), + approval.signature, + ), + ) + }); all_sigs.extend(sigs); } all_sigs @@ -1718,23 +1866,31 @@ impl State { let peer_knowledge = entry.known_by.entry(peer_id).or_default(); let topology = topologies.get_topology(entry.session); - // We want to iterate the `approval_entries` of the block entry as these contain all - // assignments that also link all approval votes. + // We want to iterate the `approval_entries` of the block entry as these contain + // all assignments that also link all approval votes. for approval_entry in entry.approval_entries.values_mut() { // Propagate the message to all peers in the required routing set OR // randomly sample peers. { let required_routing = approval_entry.routing_info().required_routing; - let random_routing = &mut approval_entry.routing_info_mut().random_routing; + let routing_info = &mut approval_entry.routing_info_mut(); let rng = &mut *rng; let mut peer_filter = move |peer_id| { let in_topology = topology.as_ref().map_or(false, |t| { t.local_grid_neighbors().route_to_peer(required_routing, peer_id) }); in_topology || { - let route_random = random_routing.sample(total_peers, rng); + if !topology + .map(|topology| topology.is_validator(peer_id)) + .unwrap_or(false) + { + return false + } + + let route_random = + routing_info.random_routing.sample(total_peers, rng); if route_random { - random_routing.inc_sent(); + routing_info.mark_randomly_sent(*peer_id); } route_random @@ -1751,7 +1907,8 @@ impl State { let (assignment_knowledge, message_kind) = approval_entry.create_assignment_knowledge(block); - // Only send stuff a peer doesn't know in the context of a relay chain block. + // Only send stuff a peer doesn't know in the context of a relay chain + // block. if !peer_knowledge.contains(&assignment_knowledge, message_kind) { peer_knowledge.sent.insert(assignment_knowledge, message_kind); assignments_to_send.push(assignment_message); @@ -1759,12 +1916,12 @@ impl State { // Filter approval votes. for approval_message in approval_messages { - let (approval_knowledge, message_kind) = approval_entry - .create_approval_knowledge(block, approval_message.candidate_index); + let approval_knowledge = + PeerKnowledge::generate_approval_key(&approval_message); - if !peer_knowledge.contains(&approval_knowledge, message_kind) { - peer_knowledge.sent.insert(approval_knowledge, message_kind); + if !peer_knowledge.contains(&approval_knowledge.0, approval_knowledge.1) { approvals_to_send.push(approval_message); + peer_knowledge.sent.insert(approval_knowledge.0, approval_knowledge.1); } } } @@ -1937,6 +2094,7 @@ impl State { // Punish the peer for the invalid message. modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) .await; + gum::error!(target: LOG_TARGET, block_hash = ?cert.block_hash, ?candidate_index, validator_index = ?cert.validator, kind = ?cert.cert.kind, "Bad assignment v1"); } else { sanitized_assignments.push((cert.into(), candidate_index.into())) } @@ -1979,6 +2137,9 @@ impl State { // Punish the peer for the invalid message. modify_reputation(&mut self.reputation, sender, peer_id, COST_OVERSIZED_BITFIELD) .await; + for candidate_index in candidate_bitfield.iter_ones() { + gum::error!(target: LOG_TARGET, block_hash = ?cert.block_hash, ?candidate_index, validator_index = ?cert.validator, "Bad assignment v2"); + } } else { sanitized_assignments.push((cert, candidate_bitfield)) } @@ -2066,11 +2227,10 @@ async fn adjust_required_routing_and_propagate { gum::debug!( target: LOG_TARGET, - "Distributing our approval vote on candidate (block={}, index={})", + "Distributing our approval vote on candidate (block={}, index={:?})", vote.block_hash, - vote.candidate_index, + vote.candidate_indices, ); state @@ -2296,7 +2456,7 @@ pub const MAX_ASSIGNMENT_BATCH_SIZE: usize = ensure_size_not_zero( /// The maximum amount of approvals per batch is 33% of maximum allowed by protocol. pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( - MAX_NOTIFICATION_SIZE as usize / std::mem::size_of::() / 3, + MAX_NOTIFICATION_SIZE as usize / std::mem::size_of::() / 3, ); // Low level helper for sending assignments. @@ -2306,12 +2466,12 @@ async fn send_assignments_batched_inner( peers: Vec, peer_version: ValidationVersion, ) { - if peer_version == ValidationVersion::VStaging { + if peer_version == ValidationVersion::V3 { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments( + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments( batch.into_iter().collect(), ), )), @@ -2362,7 +2522,7 @@ pub(crate) async fn send_assignments_batched( ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); - let vstaging_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); + let v3_peers = filter_by_peer_version(peers, ValidationVersion::V3.into()); // V1 and V2 validation protocol do not have any changes with regard to // ApprovalDistributionMessage so they can be treated the same. @@ -2400,18 +2560,13 @@ pub(crate) async fn send_assignments_batched( } } - if !vstaging_peers.is_empty() { - let mut vstaging = v2_assignments.into_iter().peekable(); + if !v3_peers.is_empty() { + let mut v3 = v2_assignments.into_iter().peekable(); - while vstaging.peek().is_some() { - let batch = vstaging.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); - send_assignments_batched_inner( - sender, - batch, - vstaging_peers.clone(), - ValidationVersion::VStaging, - ) - .await; + while v3.peek().is_some() { + let batch = v3.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); + send_assignments_batched_inner(sender, batch, v3_peers.clone(), ValidationVersion::V3) + .await; } } } @@ -2419,15 +2574,20 @@ pub(crate) async fn send_assignments_batched( /// Send approvals while honoring the `max_notification_size` of the protocol and peer version. pub(crate) async fn send_approvals_batched( sender: &mut impl overseer::ApprovalDistributionSenderTrait, - approvals: impl IntoIterator + Clone, + approvals: impl IntoIterator + Clone, peers: &[(PeerId, ProtocolVersion)], ) { let v1_peers = filter_by_peer_version(peers, ValidationVersion::V1.into()); let v2_peers = filter_by_peer_version(peers, ValidationVersion::V2.into()); - let vstaging_peers = filter_by_peer_version(peers, ValidationVersion::VStaging.into()); + let v3_peers = filter_by_peer_version(peers, ValidationVersion::V3.into()); if !v1_peers.is_empty() || !v2_peers.is_empty() { - let mut batches = approvals.clone().into_iter().peekable(); + let mut batches = approvals + .clone() + .into_iter() + .filter(|approval| approval.candidate_indices.count_ones() == 1) + .filter_map(|val| val.try_into().ok()) + .peekable(); while batches.peek().is_some() { let batch: Vec<_> = batches.by_ref().take(MAX_APPROVAL_BATCH_SIZE).collect(); @@ -2456,7 +2616,7 @@ pub(crate) async fn send_approvals_batched( } } - if !vstaging_peers.is_empty() { + if !v3_peers.is_empty() { let mut batches = approvals.into_iter().peekable(); while batches.peek().is_some() { @@ -2464,12 +2624,10 @@ pub(crate) async fn send_approvals_batched( sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers.clone(), - Versioned::VStaging( - protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(batch), - ), - ), + v3_peers.clone(), + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(batch), + )), )) .await; } diff --git a/polkadot/node/network/approval-distribution/src/metrics.rs b/polkadot/node/network/approval-distribution/src/metrics.rs index 6864259e6fdb..0642b1b2e0cd 100644 --- a/polkadot/node/network/approval-distribution/src/metrics.rs +++ b/polkadot/node/network/approval-distribution/src/metrics.rs @@ -31,6 +31,8 @@ struct MetricsInner { time_unify_with_peer: prometheus::Histogram, time_import_pending_now_known: prometheus::Histogram, time_awaiting_approval_voting: prometheus::Histogram, + assignments_received_result: prometheus::CounterVec, + approvals_received_result: prometheus::CounterVec, } trait AsLabel { @@ -78,6 +80,132 @@ impl Metrics { .map(|metrics| metrics.time_import_pending_now_known.start_timer()) } + pub fn on_approval_already_known(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["known"]).inc() + } + } + + pub fn on_approval_entry_not_found(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["noapprovalentry"]).inc() + } + } + + pub fn on_approval_recent_outdated(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["outdated"]).inc() + } + } + + pub fn on_approval_invalid_block(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["invalidblock"]).inc() + } + } + + pub fn on_approval_unknown_assignment(&self) { + if let Some(metrics) = &self.0 { + metrics + .approvals_received_result + .with_label_values(&["unknownassignment"]) + .inc() + } + } + + pub fn on_approval_duplicate(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["duplicate"]).inc() + } + } + + pub fn on_approval_out_of_view(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["outofview"]).inc() + } + } + + pub fn on_approval_good_known(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["goodknown"]).inc() + } + } + + pub fn on_approval_bad(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["bad"]).inc() + } + } + + pub fn on_approval_unexpected(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["unexpected"]).inc() + } + } + + pub fn on_approval_bug(&self) { + if let Some(metrics) = &self.0 { + metrics.approvals_received_result.with_label_values(&["bug"]).inc() + } + } + + pub fn on_assignment_already_known(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["known"]).inc() + } + } + + pub fn on_assignment_recent_outdated(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["outdated"]).inc() + } + } + + pub fn on_assignment_invalid_block(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["invalidblock"]).inc() + } + } + + pub fn on_assignment_duplicate(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["duplicate"]).inc() + } + } + + pub fn on_assignment_out_of_view(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["outofview"]).inc() + } + } + + pub fn on_assignment_good_known(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["goodknown"]).inc() + } + } + + pub fn on_assignment_bad(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["bad"]).inc() + } + } + + pub fn on_assignment_duplicatevoting(&self) { + if let Some(metrics) = &self.0 { + metrics + .assignments_received_result + .with_label_values(&["duplicatevoting"]) + .inc() + } + } + + pub fn on_assignment_far(&self) { + if let Some(metrics) = &self.0 { + metrics.assignments_received_result.with_label_values(&["far"]).inc() + } + } + pub(crate) fn time_awaiting_approval_voting( &self, ) -> Option { @@ -167,6 +295,26 @@ impl MetricsTrait for Metrics { ).buckets(vec![0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768, 4.9152, 6.5536,]))?, registry, )?, + assignments_received_result: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_assignments_received_result", + "Result of a processed assignement", + ), + &["status"] + )?, + registry, + )?, + approvals_received_result: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_approvals_received_result", + "Result of a processed approval", + ), + &["status"] + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 4d272d7af0e0..ad5d0bb0a9c5 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -25,8 +25,8 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::approval::{ v1::{ - AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, VrfPreOutput, VrfProof, - VrfSignature, + AssignmentCert, AssignmentCertKind, IndirectAssignmentCert, IndirectSignedApprovalVote, + VrfPreOutput, VrfProof, VrfSignature, }, v2::{ AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, IndirectAssignmentCertV2, @@ -133,14 +133,13 @@ fn make_gossip_topology( all_peers: &[(PeerId, AuthorityDiscoveryId)], neighbors_x: &[usize], neighbors_y: &[usize], + local_index: usize, ) -> network_bridge_event::NewGossipTopology { // This builds a grid topology which is a square matrix. // The local validator occupies the top left-hand corner. // The X peers occupy the same row and the Y peers occupy // the same column. - let local_index = 1; - assert_eq!( neighbors_x.len(), neighbors_y.len(), @@ -277,16 +276,16 @@ async fn send_message_from_peer_v2( .await; } -async fn send_message_from_peer_vstaging( +async fn send_message_from_peer_v3( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, - msg: protocol_vstaging::ApprovalDistributionMessage, + msg: protocol_v3::ApprovalDistributionMessage, ) { overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( *peer_id, - Versioned::VStaging(msg), + Versioned::V3(msg), )), ) .await; @@ -380,10 +379,11 @@ fn state_with_reputation_delay() -> State { /// the new peer sends us the same assignment #[test] fn try_import_the_same_assignment() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - let peer_d = PeerId::random(); + let peers = make_peers_and_authority_ids(15); + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let peer_d = peers.get(4).unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); @@ -394,6 +394,10 @@ fn try_import_the_same_assignment() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; + // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // testing. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { hash, @@ -446,7 +450,7 @@ fn try_import_the_same_assignment() { ); // setup new peer with V2 - setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::V3).await; // send the same assignment from peer_d let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); @@ -464,19 +468,24 @@ fn try_import_the_same_assignment() { /// cores. #[test] fn try_import_the_same_assignment_v2() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); - let peer_d = PeerId::random(); + let peers = make_peers_and_authority_ids(15); + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let peer_d = peers.get(4).unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::VStaging).await; - setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; - setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; + + // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // testing. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -503,8 +512,8 @@ fn try_import_the_same_assignment_v2() { let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfield.clone()); let assignments = vec![(cert.clone(), cores.clone().try_into().unwrap())]; - let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer_vstaging(overseer, &peer_a, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(assignments.clone()); + send_message_from_peer_v3(overseer, &peer_a, msg).await; expect_reputation_change(overseer, &peer_a, COST_UNEXPECTED_MESSAGE).await; @@ -528,8 +537,8 @@ fn try_import_the_same_assignment_v2() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) )) )) => { assert_eq!(peers.len(), 2); @@ -538,11 +547,11 @@ fn try_import_the_same_assignment_v2() { ); // setup new peer - setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_d, view![], ValidationVersion::V3).await; // send the same assignment from peer_d - let msg = protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer_vstaging(overseer, &peer_d, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(assignments); + send_message_from_peer_v3(overseer, &peer_d, msg).await; expect_reputation_change(overseer, &peer_d, COST_UNEXPECTED_MESSAGE).await; expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE).await; @@ -705,14 +714,19 @@ fn spam_attack_results_in_negative_reputation_change() { #[test] fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let parent_hash = Hash::repeat_byte(0xFF); - let peer_a = PeerId::random(); let hash = Hash::repeat_byte(0xAA); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; setup_peer_with_view(overseer, peer, view![], ValidationVersion::V1).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + // new block `hash` with 1 candidates let meta = BlockApprovalMeta { hash, @@ -780,10 +794,12 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { } #[test] -fn import_approval_happy_path() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); +fn import_approval_happy_path_v1_v2_peers() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); @@ -791,7 +807,7 @@ fn import_approval_happy_path() { let overseer = &mut virtual_overseer; // setup peers with V1 and V2 protocol versions setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V1).await; - setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V1).await; // new block `hash_a` with 1 candidates @@ -806,6 +822,9 @@ fn import_approval_happy_path() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); overseer_send(overseer, msg).await; + // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + // import an assignment related to `hash` locally let validator_index = ValidatorIndex(0); let candidate_index = 0u32; @@ -838,8 +857,8 @@ fn import_approval_happy_path() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) )) )) => { assert_eq!(peers.len(), 1); @@ -848,14 +867,15 @@ fn import_approval_happy_path() { ); // send the an approval from peer_b - let approval = IndirectSignedApprovalVote { + let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index, + candidate_indices: candidate_index.into(), validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + let msg: protocol_v3::ApprovalDistributionMessage = + protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -886,6 +906,474 @@ fn import_approval_happy_path() { }); } +// Test a v2 approval that signs multiple candidate is correctly processed. +#[test] +fn import_approval_happy_path_v2() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + // setup peers with V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 2], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // Set up a gossip topology, where a, b, and c are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(0); + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex, 1 as CandidateIndex].try_into().unwrap(); + let candidate_bitfields = vec![CoreIndex(0), CoreIndex(1)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment( + cert.clone().into(), + candidate_indices.clone(), + ), + ) + .await; + + // 1 peer is v2 + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 2); + assert_eq!(assignments.len(), 1); + } + ); + + // send the an approval from peer_b + let approval = IndirectSignedApprovalVoteV2 { + block_hash: hash, + candidate_indices, + validator: validator_index, + signature: dummy_signature(), + }; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval( + vote, + tx, + )) => { + assert_eq!(vote, approval); + tx.send(ApprovalCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_b, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert_eq!(peers.len(), 1); + assert_eq!(approvals.len(), 1); + } + ); + virtual_overseer + }); +} + +// Tests that votes that cover multiple assignments candidates are correctly processed on importing +#[test] +fn multiple_assignments_covered_with_one_approval_vote() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let peer_d = peers.get(4).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + // setup peers with V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_d, view![hash], ValidationVersion::V3).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 2], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(2); // peer_c is the originator + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex, 1 as CandidateIndex].try_into().unwrap(); + + let core_bitfields = vec![CoreIndex(0)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfields); + + // send the candidate 0 assignment from peer_b + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (0 as CandidateIndex).into(), + )]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() >= 2); + assert!(peers.contains(&peer_a)); + assert!(peers.contains(&peer_b)); + assert_eq!(assignments.len(), 1); + } + ); + + let candidate_bitfields = vec![CoreIndex(1)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + + // send the candidate 1 assignment from peer_c + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (1 as CandidateIndex).into(), + )]); + + send_message_from_peer_v3(overseer, &peer_c, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_c, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() >= 2); + assert!(peers.contains(&peer_b)); + assert!(peers.contains(&peer_a)); + assert_eq!(assignments.len(), 1); + } + ); + + // send an approval from peer_b + let approval = IndirectSignedApprovalVoteV2 { + block_hash: hash, + candidate_indices, + validator: validator_index, + signature: dummy_signature(), + }; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval( + vote, + tx, + )) => { + assert_eq!(vote, approval); + tx.send(ApprovalCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert!(peers.len() >= 2); + assert!(peers.contains(&peer_b)); + assert!(peers.contains(&peer_a)); + assert_eq!(approvals.len(), 1); + } + ); + for candidate_index in 0..1 { + let (tx_distribution, rx_distribution) = oneshot::channel(); + let mut candidates_requesting_signatures = HashSet::new(); + candidates_requesting_signatures.insert((hash, candidate_index)); + overseer_send( + overseer, + ApprovalDistributionMessage::GetApprovalSignatures( + candidates_requesting_signatures, + tx_distribution, + ), + ) + .await; + let signatures = rx_distribution.await.unwrap(); + + assert_eq!(signatures.len(), 1); + for (signing_validator, signature) in signatures { + assert_eq!(validator_index, signing_validator); + assert_eq!(signature.0, hash); + assert_eq!(signature.2, approval.signature); + assert_eq!(signature.1, vec![0, 1]); + } + } + virtual_overseer + }); +} + +// Tests that votes that cover multiple assignments candidates are correctly processed when unify +// with peer view +#[test] +fn unify_with_peer_multiple_assignments_covered_with_one_approval_vote() { + let peers = make_peers_and_authority_ids(15); + + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_d = peers.get(4).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(state_without_reputation_delay(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + setup_peer_with_view(overseer, &peer_d, view![hash], ValidationVersion::V3).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 2], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // Set up a gossip topology, where a, b, and c, d are topology neighboors to the node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(2); // peer_c is the originator + let candidate_indices: CandidateBitfield = + vec![0 as CandidateIndex, 1 as CandidateIndex].try_into().unwrap(); + + let core_bitfields = vec![CoreIndex(0)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, core_bitfields); + + // send the candidate 0 assignment from peer_b + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (0 as CandidateIndex).into(), + )]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + let candidate_bitfields = vec![CoreIndex(1)].try_into().unwrap(); + let cert = fake_assignment_cert_v2(hash, validator_index, candidate_bitfields); + + // send the candidate 1 assignment from peer_c + let assignment = IndirectAssignmentCertV2 { + block_hash: hash, + validator: validator_index, + cert: cert.cert, + }; + let msg = protocol_v3::ApprovalDistributionMessage::Assignments(vec![( + assignment, + (1 as CandidateIndex).into(), + )]); + + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( + _, _, + tx, + )) => { + tx.send(AssignmentCheckResult::Accepted).unwrap(); + } + ); + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + // send an approval from peer_b + let approval = IndirectSignedApprovalVoteV2 { + block_hash: hash, + candidate_indices, + validator: validator_index, + signature: dummy_signature(), + }; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_d, msg).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval( + vote, + tx, + )) => { + assert_eq!(vote, approval); + tx.send(ApprovalCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE_FIRST).await; + + // setup peers with V2 protocol versions + setup_peer_with_view(overseer, &peer_a, view![hash], ValidationVersion::V3).await; + setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V3).await; + let mut expected_peers_assignments = vec![peer_a, peer_b]; + let mut expected_peers_approvals = vec![peer_a, peer_b]; + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_assignments.contains(peers.first().unwrap())); + expected_peers_assignments.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(assignments.len(), 2); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_approvals.contains(peers.first().unwrap())); + expected_peers_approvals.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(approvals.len(), 1); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_assignments.contains(peers.first().unwrap())); + expected_peers_assignments.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(assignments.len(), 2); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert!(peers.len() == 1); + assert!(expected_peers_approvals.contains(peers.first().unwrap())); + expected_peers_approvals.retain(|peer| peer != peers.first().unwrap()); + assert_eq!(approvals.len(), 1); + } + ); + + virtual_overseer + }); +} + #[test] fn import_approval_bad() { let peer_a = PeerId::random(); @@ -916,14 +1404,14 @@ fn import_approval_bad() { let cert = fake_assignment_cert(hash, validator_index); // send the an approval from peer_b, we don't have an assignment yet - let approval = IndirectSignedApprovalVote { + let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index, + candidate_indices: candidate_index.into(), validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; expect_reputation_change(overseer, &peer_b, COST_UNEXPECTED_MESSAGE).await; @@ -948,8 +1436,8 @@ fn import_approval_bad() { expect_reputation_change(overseer, &peer_b, BENEFIT_VALID_MESSAGE_FIRST).await; // and try again - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, &peer_b, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -1048,7 +1536,8 @@ fn update_peer_view() { let hash_b = Hash::repeat_byte(0xBB); let hash_c = Hash::repeat_byte(0xCC); let hash_d = Hash::repeat_byte(0xDD); - let peer_a = PeerId::random(); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; let peer = &peer_a; let state = test_harness(State::default(), |mut virtual_overseer| async move { @@ -1082,6 +1571,9 @@ fn update_peer_view() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta_a, meta_b, meta_c]); overseer_send(overseer, msg).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + let cert_a = fake_assignment_cert(hash_a, ValidatorIndex(0)); let cert_b = fake_assignment_cert(hash_b, ValidatorIndex(0)); @@ -1264,14 +1756,14 @@ fn import_remotely_then_locally() { assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "no message should be sent"); // send the approval remotely - let approval = IndirectSignedApprovalVote { + let approval = IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index, + candidate_indices: candidate_index.into(), validator: validator_index, signature: dummy_signature(), }; - let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, peer, msg).await; + let msg = protocol_v3::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer_v3(overseer, peer, msg).await; assert_matches!( overseer_recv(overseer).await, @@ -1295,7 +1787,8 @@ fn import_remotely_then_locally() { #[test] fn sends_assignments_even_when_state_is_approved() { - let peer_a = PeerId::random(); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); let peer = &peer_a; @@ -1315,6 +1808,9 @@ fn sends_assignments_even_when_state_is_approved() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); overseer_send(overseer, msg).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + let validator_index = ValidatorIndex(0); let candidate_index = 0u32; @@ -1336,8 +1832,11 @@ fn sends_assignments_even_when_state_is_approved() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; // connect the peer. setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V1).await; @@ -1380,7 +1879,8 @@ fn sends_assignments_even_when_state_is_approved() { /// assignemnts. #[test] fn sends_assignments_even_when_state_is_approved_v2() { - let peer_a = PeerId::random(); + let peers = make_peers_and_authority_ids(8); + let peer_a = peers.first().unwrap().0; let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); let peer = &peer_a; @@ -1400,6 +1900,9 @@ fn sends_assignments_even_when_state_is_approved_v2() { let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); overseer_send(overseer, msg).await; + // Setup a topology where peer_a is neigboor to current node. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0], &[2], 1)).await; + let validator_index = ValidatorIndex(0); let cores = vec![0, 1, 2, 3]; let candidate_bitfield: CandidateBitfield = cores.clone().try_into().unwrap(); @@ -1416,9 +1919,9 @@ fn sends_assignments_even_when_state_is_approved_v2() { // Assumes candidate index == core index. let approvals = cores .iter() - .map(|core| IndirectSignedApprovalVote { + .map(|core| IndirectSignedApprovalVoteV2 { block_hash: hash, - candidate_index: *core, + candidate_indices: (*core).into(), validator: validator_index, signature: dummy_signature(), }) @@ -1442,7 +1945,7 @@ fn sends_assignments_even_when_state_is_approved_v2() { } // connect the peer. - setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::VStaging).await; + setup_peer_with_view(overseer, peer, view![hash], ValidationVersion::V3).await; let assignments = vec![(cert.clone(), candidate_bitfield.clone())]; @@ -1450,8 +1953,8 @@ fn sends_assignments_even_when_state_is_approved_v2() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Assignments(sent_assignments) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Assignments(sent_assignments) )) )) => { assert_eq!(peers, vec![*peer]); @@ -1463,14 +1966,14 @@ fn sends_assignments_even_when_state_is_approved_v2() { overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( peers, - Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( - protocol_vstaging::ApprovalDistributionMessage::Approvals(sent_approvals) + Versioned::V3(protocol_v3::ValidationProtocol::ApprovalDistribution( + protocol_v3::ApprovalDistributionMessage::Approvals(sent_approvals) )) )) => { // Construct a hashmaps of approvals for comparison. Approval distribution reorders messages because they are kept in a // hashmap as well. - let sent_approvals = sent_approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); - let approvals = approvals.into_iter().map(|approval| (approval.candidate_index, approval)).collect::>(); + let sent_approvals = sent_approvals.into_iter().map(|approval| (approval.candidate_indices.clone(), approval)).collect::>(); + let approvals = approvals.into_iter().map(|approval| (approval.candidate_indices.clone(), approval)).collect::>(); assert_eq!(peers, vec![*peer]); assert_eq!(sent_approvals, approvals); @@ -1580,13 +2083,19 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology( + 1, + &peers, + &[0, 10, 20, 30, 40, 60, 70, 80], + &[50, 51, 52, 53, 54, 55, 56, 57], + 1, + ), ) .await; let expected_indices = [ // Both dimensions in the gossip topology - 0, 10, 20, 30, 50, 51, 52, 53, + 0, 10, 20, 30, 40, 60, 70, 80, 50, 51, 52, 53, 54, 55, 56, 57, ]; // new block `hash_a` with 1 candidates @@ -1623,8 +2132,11 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -1688,7 +2200,7 @@ fn propagates_assignments_along_unshared_dimension() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -1831,13 +2343,19 @@ fn propagates_to_required_after_connect() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology( + 1, + &peers, + &[0, 10, 20, 30, 40, 60, 70, 80], + &[50, 51, 52, 53, 54, 55, 56, 57], + 1, + ), ) .await; let expected_indices = [ // Both dimensions in the gossip topology, minus omitted. - 20, 30, 52, 53, + 20, 30, 40, 60, 70, 80, 52, 53, 54, 55, 56, 57, ]; // new block `hash_a` with 1 candidates @@ -1874,8 +2392,11 @@ fn propagates_to_required_after_connect() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -2002,53 +2523,21 @@ fn sends_to_more_peers_after_getting_topology() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; - let mut expected_indices = vec![0, 10, 20, 30, 50, 51, 52, 53]; - let assignment_sent_peers = assert_matches!( - overseer_recv(overseer).await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( - sent_peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(sent_assignments) - )) - )) => { - // Only sends to random peers. - assert_eq!(sent_peers.len(), 4); - for peer in &sent_peers { - let i = peers.iter().position(|p| peer == &p.0).unwrap(); - // Random gossip before topology can send to topology-targeted peers. - // Remove them from the expected indices so we don't expect - // them to get the messages again after the assignment. - expected_indices.retain(|&i2| i2 != i); - } - assert_eq!(sent_assignments, assignments); - sent_peers - } - ); - - assert_matches!( - overseer_recv(overseer).await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( - sent_peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(sent_approvals) - )) - )) => { - // Random sampling is reused from the assignment. - assert_eq!(sent_peers, assignment_sent_peers); - assert_eq!(sent_approvals, approvals); - } - ); + let expected_indices = vec![0, 10, 20, 30, 50, 51, 52, 53]; // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2151,7 +2640,7 @@ fn originator_aggression_l1() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2164,8 +2653,11 @@ fn originator_aggression_l1() { ) .await; - overseer_send(overseer, ApprovalDistributionMessage::DistributeApproval(approval.clone())) - .await; + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeApproval(approval.clone().into()), + ) + .await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -2307,7 +2799,7 @@ fn non_originator_aggression_l1() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2412,7 +2904,7 @@ fn non_originator_aggression_l2() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2558,7 +3050,7 @@ fn resends_messages_periodically() { // Set up a gossip topology. setup_gossip_topology( overseer, - make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53]), + make_gossip_topology(1, &peers, &[0, 10, 20, 30], &[50, 51, 52, 53], 1), ) .await; @@ -2681,12 +3173,13 @@ fn resends_messages_periodically() { /// Tests that peers correctly receive versioned messages. #[test] fn import_versioned_approval() { - let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - let peer_c = PeerId::random(); + let peers = make_peers_and_authority_ids(15); + let peer_a = peers.get(0).unwrap().0; + let peer_b = peers.get(1).unwrap().0; + let peer_c = peers.get(2).unwrap().0; + let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); - let state = state_without_reputation_delay(); let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; @@ -2695,6 +3188,10 @@ fn import_versioned_approval() { setup_peer_with_view(overseer, &peer_b, view![hash], ValidationVersion::V1).await; setup_peer_with_view(overseer, &peer_c, view![hash], ValidationVersion::V2).await; + // Set up a gossip topology, where a, b, c and d are topology neighboors to the node under + // testing. + setup_gossip_topology(overseer, make_gossip_topology(1, &peers, &[0, 1], &[2, 4], 3)).await; + // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { hash, @@ -2762,7 +3259,7 @@ fn import_versioned_approval() { vote, tx, )) => { - assert_eq!(vote, approval); + assert_eq!(vote, approval.into()); tx.send(ApprovalCheckResult::Accepted).unwrap(); } ); @@ -2782,6 +3279,7 @@ fn import_versioned_approval() { assert_eq!(approvals.len(), 1); } ); + assert_matches!( overseer_recv(overseer).await, AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( @@ -2821,9 +3319,9 @@ fn batch_test_round(message_count: usize) { .collect(); let approvals: Vec<_> = validators - .map(|index| IndirectSignedApprovalVote { + .map(|index| IndirectSignedApprovalVoteV2 { block_hash: Hash::zero(), - candidate_index: 0, + candidate_indices: 0u32.into(), validator: ValidatorIndex(index as u32), signature: dummy_signature(), }) @@ -2890,7 +3388,7 @@ fn batch_test_round(message_count: usize) { assert_eq!(peers.len(), 1); for (message_index, approval) in sent_approvals.iter().enumerate() { - assert_eq!(approval, &approvals[approval_index + message_index]); + assert_eq!(approval, &approvals[approval_index + message_index].clone().try_into().unwrap()); } } ); diff --git a/polkadot/node/network/bitfield-distribution/src/lib.rs b/polkadot/node/network/bitfield-distribution/src/lib.rs index 9cc79aee8490..76baf499cad7 100644 --- a/polkadot/node/network/bitfield-distribution/src/lib.rs +++ b/polkadot/node/network/bitfield-distribution/src/lib.rs @@ -32,7 +32,7 @@ use polkadot_node_network_protocol::{ GridNeighbors, RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, }, peer_set::{ProtocolVersion, ValidationVersion}, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, OurView, PeerId, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_subsystem::{ @@ -102,8 +102,8 @@ impl BitfieldGossipMessage { self.relay_parent, self.signed_availability.into(), )), - Some(ValidationVersion::VStaging) => - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Some(ValidationVersion::V3) => + Versioned::V3(protocol_v3::BitfieldDistributionMessage::Bitfield( self.relay_parent, self.signed_availability.into(), )), @@ -503,8 +503,8 @@ async fn relay_message( let v2_interested_peers = filter_by_peer_version(&interested_peers, ValidationVersion::V2.into()); - let vstaging_interested_peers = - filter_by_peer_version(&interested_peers, ValidationVersion::VStaging.into()); + let v3_interested_peers = + filter_by_peer_version(&interested_peers, ValidationVersion::V3.into()); if !v1_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( @@ -522,10 +522,10 @@ async fn relay_message( .await } - if !vstaging_interested_peers.is_empty() { + if !v3_interested_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_interested_peers, - message.into_validation_protocol(ValidationVersion::VStaging.into()), + v3_interested_peers, + message.into_validation_protocol(ValidationVersion::V3.into()), )) .await } @@ -551,7 +551,7 @@ async fn process_incoming_peer_message( relay_parent, bitfield, )) | - Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + Versioned::V3(protocol_v3::BitfieldDistributionMessage::Bitfield( relay_parent, bitfield, )) => (relay_parent, bitfield), diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index a9339a5c443c..2fcf5cec489d 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -33,7 +33,7 @@ use sc_network::{ use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet, ProtocolVersion, ValidationVersion}, request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests}, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, PeerId, }; use polkadot_primitives::{AuthorityDiscoveryId, Block, Hash}; @@ -62,20 +62,20 @@ pub(crate) fn send_validation_message_v1( ); } -// Helper function to send a validation vstaging message to a list of peers. +// Helper function to send a validation v3 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. -pub(crate) fn send_validation_message_vstaging( +pub(crate) fn send_validation_message_v3( peers: Vec, - message: WireMessage, + message: WireMessage, metrics: &Metrics, notification_sinks: &Arc>>>, ) { - gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation vstaging message to peers",); + gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v3 message to peers",); send_message( peers, PeerSet::Validation, - ValidationVersion::VStaging.into(), + ValidationVersion::V3.into(), message, metrics, notification_sinks, diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 40cd167a968b..49d81aea76af 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -37,8 +37,8 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, ObservedRole, OurView, - PeerId, UnifiedReputationChange as Rep, View, + v1 as protocol_v1, v2 as protocol_v2, v3 as protocol_v3, ObservedRole, OurView, PeerId, + UnifiedReputationChange as Rep, View, }; use polkadot_node_subsystem::{ @@ -70,7 +70,7 @@ use super::validator_discovery; /// Defines the `Network` trait with an implementation for an `Arc`. use crate::network::{ send_collation_message_v1, send_collation_message_v2, send_validation_message_v1, - send_validation_message_v2, send_validation_message_vstaging, Network, + send_validation_message_v2, send_validation_message_v3, Network, }; use crate::{network::get_peer_id_by_authority_id, WireMessage}; @@ -294,9 +294,9 @@ async fn handle_validation_message( metrics, notification_sinks, ), - ValidationVersion::VStaging => send_validation_message_vstaging( + ValidationVersion::V3 => send_validation_message_v3( vec![peer], - WireMessage::::ViewUpdate(local_view), + WireMessage::::ViewUpdate(local_view), metrics, notification_sinks, ), @@ -360,48 +360,47 @@ async fn handle_validation_message( ?peer, ); - let (events, reports) = - if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V1.into()) { - handle_peer_messages::( - peer, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - vec![notification.into()], - metrics, - ) - } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::V2.into()) - { - handle_peer_messages::( - peer, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - vec![notification.into()], - metrics, - ) - } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::VStaging.into()) - { - handle_peer_messages::( - peer, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - vec![notification.into()], - metrics, - ) - } else { - gum::warn!( - target: LOG_TARGET, - version = ?expected_versions[PeerSet::Validation], - "Major logic bug. Peer somehow has unsupported validation protocol version." - ); + let (events, reports) = if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::V1.into()) + { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V2.into()) { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V3.into()) { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else { + gum::warn!( + target: LOG_TARGET, + version = ?expected_versions[PeerSet::Validation], + "Major logic bug. Peer somehow has unsupported validation protocol version." + ); - never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); + never!( + "Only versions 1 and 2 are supported; peer set connection checked above; qed" + ); - // If a peer somehow triggers this, we'll disconnect them - // eventually. - (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) - }; + // If a peer somehow triggers this, we'll disconnect them + // eventually. + (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) + }; for report in reports { network_service.report_peer(peer, report.into()); @@ -980,8 +979,8 @@ fn update_our_view( filter_by_peer_version(&validation_peers, ValidationVersion::V2.into()); let v2_collation_peers = filter_by_peer_version(&collation_peers, CollationVersion::V2.into()); - let vstaging_validation_peers = - filter_by_peer_version(&validation_peers, ValidationVersion::VStaging.into()); + let v3_validation_peers = + filter_by_peer_version(&validation_peers, ValidationVersion::V3.into()); send_validation_message_v1( v1_validation_peers, @@ -1011,8 +1010,8 @@ fn update_our_view( notification_sinks, ); - send_validation_message_vstaging( - vstaging_validation_peers, + send_validation_message_v3( + v3_validation_peers, WireMessage::ViewUpdate(new_view.clone()), metrics, notification_sinks, diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index e0b86feb6442..6847b8a7e24d 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -224,7 +224,7 @@ impl TestNetworkHandle { PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/1")), PeerSet::Collation => Some(ProtocolName::from("/polkadot/collation/1")), }, - ValidationVersion::VStaging => match peer_set { + ValidationVersion::V3 => match peer_set { PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/3")), PeerSet::Collation => unreachable!(), }, @@ -1433,8 +1433,8 @@ fn network_protocol_versioning_view_update() { ValidationVersion::V2 => WireMessage::::ViewUpdate(view.clone()) .encode(), - ValidationVersion::VStaging => - WireMessage::::ViewUpdate(view.clone()) + ValidationVersion::V3 => + WireMessage::::ViewUpdate(view.clone()) .encode(), }; assert_network_actions_contains( @@ -1469,7 +1469,7 @@ fn network_protocol_versioning_subsystem_msg() { NetworkBridgeEvent::PeerConnected( peer, ObservedRole::Full, - ValidationVersion::V2.into(), + ValidationVersion::V3.into(), None, ), &mut virtual_overseer, @@ -1484,9 +1484,9 @@ fn network_protocol_versioning_subsystem_msg() { } let approval_distribution_message = - protocol_v2::ApprovalDistributionMessage::Approvals(Vec::new()); + protocol_v3::ApprovalDistributionMessage::Approvals(Vec::new()); - let msg = protocol_v2::ValidationProtocol::ApprovalDistribution( + let msg = protocol_v3::ValidationProtocol::ApprovalDistribution( approval_distribution_message.clone(), ); @@ -1502,7 +1502,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::ApprovalDistribution( ApprovalDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V3(m)) ) ) => { assert_eq!(p, peer); @@ -1536,7 +1536,7 @@ fn network_protocol_versioning_subsystem_msg() { virtual_overseer.recv().await, AllMessages::StatementDistribution( StatementDistributionMessage::NetworkBridgeUpdate( - NetworkBridgeEvent::PeerMessage(p, Versioned::V2(m)) + NetworkBridgeEvent::PeerMessage(p, Versioned::V3(m)) ) ) => { assert_eq!(p, peer); diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index bdcd1574e335..22802608e1d5 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -41,7 +41,7 @@ use crate::validator_discovery; /// Defines the `Network` trait with an implementation for an `Arc`. use crate::network::{ send_collation_message_v1, send_collation_message_v2, send_validation_message_v1, - send_validation_message_v2, send_validation_message_vstaging, Network, + send_validation_message_v2, send_validation_message_v3, Network, }; use crate::metrics::Metrics; @@ -205,7 +205,7 @@ where &metrics, notification_sinks, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V3(msg) => send_validation_message_v3( peers, WireMessage::ProtocolMessage(msg), &metrics, @@ -235,7 +235,7 @@ where &metrics, notification_sinks, ), - Versioned::VStaging(msg) => send_validation_message_vstaging( + Versioned::V3(msg) => send_validation_message_v3( peers, WireMessage::ProtocolMessage(msg), &metrics, @@ -264,7 +264,7 @@ where &metrics, notification_sinks, ), - Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( + Versioned::V2(msg) | Versioned::V3(msg) => send_collation_message_v2( peers, WireMessage::ProtocolMessage(msg), &metrics, @@ -287,7 +287,7 @@ where &metrics, notification_sinks, ), - Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( + Versioned::V2(msg) | Versioned::V3(msg) => send_collation_message_v2( peers, WireMessage::ProtocolMessage(msg), &metrics, diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index b3a396e1be34..8fb0bb215444 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -882,7 +882,7 @@ async fn handle_incoming_peer_message( match msg { Versioned::V1(V1::Declare(..)) | Versioned::V2(V2::Declare(..)) | - Versioned::VStaging(V2::Declare(..)) => { + Versioned::V3(V2::Declare(..)) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -895,7 +895,7 @@ async fn handle_incoming_peer_message( }, Versioned::V1(V1::AdvertiseCollation(_)) | Versioned::V2(V2::AdvertiseCollation { .. }) | - Versioned::VStaging(V2::AdvertiseCollation { .. }) => { + Versioned::V3(V2::AdvertiseCollation { .. }) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -911,7 +911,7 @@ async fn handle_incoming_peer_message( }, Versioned::V1(V1::CollationSeconded(relay_parent, statement)) | Versioned::V2(V2::CollationSeconded(relay_parent, statement)) | - Versioned::VStaging(V2::CollationSeconded(relay_parent, statement)) => { + Versioned::V3(V2::CollationSeconded(relay_parent, statement)) => { if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 20b3b9ea1d26..48ad3c711a6d 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -777,7 +777,7 @@ async fn process_incoming_peer_message( match msg { Versioned::V1(V1::Declare(collator_id, para_id, signature)) | Versioned::V2(V2::Declare(collator_id, para_id, signature)) | - Versioned::VStaging(V2::Declare(collator_id, para_id, signature)) => { + Versioned::V3(V2::Declare(collator_id, para_id, signature)) => { if collator_peer_id(&state.peer_data, &collator_id).is_some() { modify_reputation( &mut state.reputation, @@ -894,7 +894,7 @@ async fn process_incoming_peer_message( candidate_hash, parent_head_data_hash, }) | - Versioned::VStaging(V2::AdvertiseCollation { + Versioned::V3(V2::AdvertiseCollation { relay_parent, candidate_hash, parent_head_data_hash, @@ -923,7 +923,7 @@ async fn process_incoming_peer_message( }, Versioned::V1(V1::CollationSeconded(..)) | Versioned::V2(V2::CollationSeconded(..)) | - Versioned::VStaging(V2::CollationSeconded(..)) => { + Versioned::V3(V2::CollationSeconded(..)) => { gum::warn!( target: LOG_TARGET, peer_id = ?origin, diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 0d1b04f2ba23..22417795d5ea 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -477,7 +477,7 @@ where match message { Versioned::V1(m) => match m {}, Versioned::V2(m) => match m {}, - Versioned::VStaging(m) => match m {}, + Versioned::V3(m) => match m {}, } }, } diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index c33b9eae3252..379334ded24a 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -27,6 +27,3 @@ bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" - -[features] -network-protocol-staging = [] diff --git a/polkadot/node/network/protocol/src/grid_topology.rs b/polkadot/node/network/protocol/src/grid_topology.rs index 99dd513c4d79..8bd9adbc17c1 100644 --- a/polkadot/node/network/protocol/src/grid_topology.rs +++ b/polkadot/node/network/protocol/src/grid_topology.rs @@ -73,12 +73,20 @@ pub struct SessionGridTopology { shuffled_indices: Vec, /// The canonical shuffling of validators for the session. canonical_shuffling: Vec, + /// The list of peer-ids in an efficient way to search. + peer_ids: HashSet, } impl SessionGridTopology { /// Create a new session grid topology. pub fn new(shuffled_indices: Vec, canonical_shuffling: Vec) -> Self { - SessionGridTopology { shuffled_indices, canonical_shuffling } + let mut peer_ids = HashSet::new(); + for peer_info in canonical_shuffling.iter() { + for peer_id in peer_info.peer_ids.iter() { + peer_ids.insert(*peer_id); + } + } + SessionGridTopology { shuffled_indices, canonical_shuffling, peer_ids } } /// Produces the outgoing routing logic for a particular peer. @@ -111,6 +119,11 @@ impl SessionGridTopology { Some(grid_subset) } + + /// Tells if a given peer id is validator in a session + pub fn is_validator(&self, peer: &PeerId) -> bool { + self.peer_ids.contains(peer) + } } struct MatrixNeighbors { @@ -273,6 +286,11 @@ impl SessionGridTopologyEntry { pub fn get(&self) -> &SessionGridTopology { &self.topology } + + /// Tells if a given peer id is validator in a session + pub fn is_validator(&self, peer: &PeerId) -> bool { + self.topology.is_validator(peer) + } } /// A set of topologies indexed by session @@ -347,6 +365,7 @@ impl Default for SessionBoundGridTopologyStorage { topology: SessionGridTopology { shuffled_indices: Vec::new(), canonical_shuffling: Vec::new(), + peer_ids: Default::default(), }, local_neighbors: GridNeighbors::empty(), }, diff --git a/polkadot/node/network/protocol/src/lib.rs b/polkadot/node/network/protocol/src/lib.rs index 9aeeb98ea9d6..ae72230ee43d 100644 --- a/polkadot/node/network/protocol/src/lib.rs +++ b/polkadot/node/network/protocol/src/lib.rs @@ -253,29 +253,29 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned { +pub enum Versioned { /// V1 type. V1(V1), /// V2 type. V2(V2), - /// VStaging type - VStaging(VStaging), + /// V3 type + V3(V3), } -impl Versioned<&'_ V1, &'_ V2, &'_ VStaging> { +impl Versioned<&'_ V1, &'_ V2, &'_ V3> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned { + pub fn clone_inner(&self) -> Versioned { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), Versioned::V2(inner) => Versioned::V2(inner.clone()), - Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), + Versioned::V3(inner) => Versioned::V3(inner.clone()), } } } /// All supported versions of the validation protocol message. pub type VersionedValidationProtocol = - Versioned; + Versioned; impl From for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -289,9 +289,9 @@ impl From for VersionedValidationProtocol { } } -impl From for VersionedValidationProtocol { - fn from(vstaging: vstaging::ValidationProtocol) -> Self { - VersionedValidationProtocol::VStaging(vstaging) +impl From for VersionedValidationProtocol { + fn from(v3: v3::ValidationProtocol) -> Self { + VersionedValidationProtocol::V3(v3) } } @@ -317,7 +317,7 @@ macro_rules! impl_versioned_full_protocol_from { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), Versioned::V2(x) => Versioned::V2(x.into()), - Versioned::VStaging(x) => Versioned::VStaging(x.into()), + Versioned::V3(x) => Versioned::V3(x.into()), } } } @@ -331,7 +331,7 @@ macro_rules! impl_versioned_try_from { $out:ty, $v1_pat:pat => $v1_out:expr, $v2_pat:pat => $v2_out:expr, - $vstaging_pat:pat => $vstaging_out:expr + $v3_pat:pat => $v3_out:expr ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -341,7 +341,7 @@ macro_rules! impl_versioned_try_from { match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out)), - Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), + Versioned::V3($v3_pat) => Ok(Versioned::V3($v3_out)), _ => Err(crate::WrongVariant), } } @@ -355,8 +355,7 @@ macro_rules! impl_versioned_try_from { match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), Versioned::V2($v2_pat) => Ok(Versioned::V2($v2_out.clone())), - Versioned::VStaging($vstaging_pat) => - Ok(Versioned::VStaging($vstaging_out.clone())), + Versioned::V3($v3_pat) => Ok(Versioned::V3($v3_out.clone())), _ => Err(crate::WrongVariant), } } @@ -368,7 +367,7 @@ macro_rules! impl_versioned_try_from { pub type BitfieldDistributionMessage = Versioned< v1::BitfieldDistributionMessage, v2::BitfieldDistributionMessage, - vstaging::BitfieldDistributionMessage, + v3::BitfieldDistributionMessage, >; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, @@ -380,14 +379,14 @@ impl_versioned_try_from!( BitfieldDistributionMessage, v1::ValidationProtocol::BitfieldDistribution(x) => x, v2::ValidationProtocol::BitfieldDistribution(x) => x, - vstaging::ValidationProtocol::BitfieldDistribution(x) => x + v3::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. pub type StatementDistributionMessage = Versioned< v1::StatementDistributionMessage, v2::StatementDistributionMessage, - vstaging::StatementDistributionMessage, + v3::StatementDistributionMessage, >; impl_versioned_full_protocol_from!( StatementDistributionMessage, @@ -399,14 +398,14 @@ impl_versioned_try_from!( StatementDistributionMessage, v1::ValidationProtocol::StatementDistribution(x) => x, v2::ValidationProtocol::StatementDistribution(x) => x, - vstaging::ValidationProtocol::StatementDistribution(x) => x + v3::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. pub type ApprovalDistributionMessage = Versioned< v1::ApprovalDistributionMessage, v2::ApprovalDistributionMessage, - vstaging::ApprovalDistributionMessage, + v3::ApprovalDistributionMessage, >; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, @@ -418,7 +417,7 @@ impl_versioned_try_from!( ApprovalDistributionMessage, v1::ValidationProtocol::ApprovalDistribution(x) => x, v2::ValidationProtocol::ApprovalDistribution(x) => x, - vstaging::ValidationProtocol::ApprovalDistribution(x) => x + v3::ValidationProtocol::ApprovalDistribution(x) => x ); @@ -426,7 +425,7 @@ impl_versioned_try_from!( pub type GossipSupportNetworkMessage = Versioned< v1::GossipSupportNetworkMessage, v2::GossipSupportNetworkMessage, - vstaging::GossipSupportNetworkMessage, + v3::GossipSupportNetworkMessage, >; // This is a void enum placeholder, so never gets sent over the wire. @@ -871,19 +870,17 @@ pub mod v2 { } } -/// vstaging network protocol types, intended to become v3. -/// Initial purpose is for chaning ApprovalDistributionMessage to -/// include more than one assignment in the message. -pub mod vstaging { +/// v3 network protocol types. +/// Purpose is for chaning ApprovalDistributionMessage to +/// include more than one assignment and approval in a message. +pub mod v3 { use parity_scale_codec::{Decode, Encode}; - use polkadot_node_primitives::approval::{ - v1::IndirectSignedApprovalVote, - v2::{CandidateBitfield, IndirectAssignmentCertV2}, + use polkadot_node_primitives::approval::v2::{ + CandidateBitfield, IndirectAssignmentCertV2, IndirectSignedApprovalVoteV2, }; - /// This parts of the protocol did not change from v2, so just alias them in vstaging, - /// no reason why they can't be change untill vstaging becomes v3 and is released. + /// This parts of the protocol did not change from v2, so just alias them in v3. pub use super::v2::{ declare_signature_payload, BackedCandidateAcknowledgement, BackedCandidateManifest, BitfieldDistributionMessage, GossipSupportNetworkMessage, StatementDistributionMessage, @@ -903,7 +900,7 @@ pub mod vstaging { Assignments(Vec<(IndirectAssignmentCertV2, CandidateBitfield)>), /// Approvals for candidates in some recent, unfinalized block. #[codec(index = 1)] - Approvals(Vec), + Approvals(Vec), } /// All network messages on the validation peer-set. diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index 7e257d508b5b..cb329607ad61 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -73,7 +73,11 @@ impl PeerSet { // Networking layer relies on `get_main_name()` being the main name of the protocol // for peersets and connection management. let protocol = peerset_protocol_names.get_main_name(self); - let fallback_names = PeerSetProtocolNames::get_fallback_names(self); + let fallback_names = PeerSetProtocolNames::get_fallback_names( + self, + &peerset_protocol_names.genesis_hash, + peerset_protocol_names.fork_id.as_deref(), + ); let max_notification_size = self.get_max_notification_size(is_authority); match self { @@ -127,15 +131,8 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { - #[cfg(not(feature = "network-protocol-staging"))] - match self { - PeerSet::Validation => ValidationVersion::V2.into(), - PeerSet::Collation => CollationVersion::V2.into(), - } - - #[cfg(feature = "network-protocol-staging")] match self { - PeerSet::Validation => ValidationVersion::VStaging.into(), + PeerSet::Validation => ValidationVersion::V3.into(), PeerSet::Collation => CollationVersion::V2.into(), } } @@ -163,7 +160,7 @@ impl PeerSet { Some("validation/1") } else if version == ValidationVersion::V2.into() { Some("validation/2") - } else if version == ValidationVersion::VStaging.into() { + } else if version == ValidationVersion::V3.into() { Some("validation/3") } else { None @@ -236,9 +233,10 @@ pub enum ValidationVersion { V1 = 1, /// The second version. V2 = 2, - /// The staging version to gather changes - /// that before the release become v3. - VStaging = 3, + /// The third version where changes to ApprovalDistributionMessage had been made. + /// The changes are translatable to V2 format untill assignments v2 and approvals + /// coalescing is enabled through a runtime upgrade. + V3 = 3, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. @@ -299,6 +297,8 @@ impl From for ProtocolVersion { pub struct PeerSetProtocolNames { protocols: HashMap, names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>, + genesis_hash: Hash, + fork_id: Option, } impl PeerSetProtocolNames { @@ -333,7 +333,7 @@ impl PeerSetProtocolNames { } Self::register_legacy_protocol(&mut protocols, protocol); } - Self { protocols, names } + Self { protocols, names, genesis_hash, fork_id: fork_id.map(|fork_id| fork_id.into()) } } /// Helper function to register main protocol. @@ -437,9 +437,30 @@ impl PeerSetProtocolNames { } /// Get the protocol fallback names. Currently only holds the legacy name - /// for `LEGACY_PROTOCOL_VERSION` = 1. - fn get_fallback_names(protocol: PeerSet) -> Vec { - std::iter::once(Self::get_legacy_name(protocol)).collect() + /// for `LEGACY_PROTOCOL_VERSION` = 1 and v2 for validation. + fn get_fallback_names( + protocol: PeerSet, + genesis_hash: &Hash, + fork_id: Option<&str>, + ) -> Vec { + let mut fallbacks = vec![Self::get_legacy_name(protocol)]; + match protocol { + PeerSet::Validation => { + // Fallbacks are tried one by one, till one matches so push v2 at the top, so + // that it is used ahead of the legacy one(v1). + fallbacks.insert( + 0, + Self::generate_name( + genesis_hash, + fork_id, + protocol, + ValidationVersion::V2.into(), + ), + ) + }, + PeerSet::Collation => {}, + }; + fallbacks } } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs index d9866af1ee23..93f97fe1dd6e 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -22,8 +22,8 @@ use polkadot_node_network_protocol::{ grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, - v2 as protocol_v2, vstaging as protocol_vstaging, IfDisconnected, PeerId, - UnifiedReputationChange as Rep, Versioned, View, + v2 as protocol_v2, v3 as protocol_v3, IfDisconnected, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, @@ -1075,7 +1075,7 @@ async fn circulate_statement<'a, Context>( }) .partition::, _>(|(_, _, version)| match version { ValidationVersion::V1 => true, - ValidationVersion::V2 | ValidationVersion::VStaging => false, + ValidationVersion::V2 | ValidationVersion::V3 => false, }); // partition is handy here but not if we add more protocol versions let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics); @@ -1108,8 +1108,7 @@ async fn circulate_statement<'a, Context>( .collect(); let v2_peers_to_send = filter_by_peer_version(&peers_to_send, ValidationVersion::V2.into()); - let vstaging_to_send = - filter_by_peer_version(&peers_to_send, ValidationVersion::VStaging.into()); + let v3_to_send = filter_by_peer_version(&peers_to_send, ValidationVersion::V3.into()); if !v2_peers_to_send.is_empty() { gum::trace!( @@ -1126,17 +1125,17 @@ async fn circulate_statement<'a, Context>( .await; } - if !vstaging_to_send.is_empty() { + if !v3_to_send.is_empty() { gum::trace!( target: LOG_TARGET, - ?vstaging_to_send, + ?v3_to_send, ?relay_parent, statement = ?stored.statement, - "Sending statement to vstaging peers", + "Sending statement to v3 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_to_send, - compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), + v3_to_send, + compatible_v1_message(ValidationVersion::V3, payload.clone()).into(), )) .await; } @@ -1472,10 +1471,8 @@ async fn handle_incoming_message<'a, Context>( let message = match message { Versioned::V1(m) => m, Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(m)) | - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( - m, - )) => m, - Versioned::V2(_) | Versioned::VStaging(_) => { + Versioned::V3(protocol_v3::StatementDistributionMessage::V1Compatibility(m)) => m, + Versioned::V2(_) | Versioned::V3(_) => { // The higher-level subsystem code is supposed to filter out // all non v1 messages. gum::debug!( @@ -2201,8 +2198,7 @@ fn compatible_v1_message( ValidationVersion::V1 => Versioned::V1(message), ValidationVersion::V2 => Versioned::V2(protocol_v2::StatementDistributionMessage::V1Compatibility(message)), - ValidationVersion::VStaging => Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(message), - ), + ValidationVersion::V3 => + Versioned::V3(protocol_v3::StatementDistributionMessage::V1Compatibility(message)), } } diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs index ef1fc7cd78b5..a1ba1137b5ac 100644 --- a/polkadot/node/network/statement-distribution/src/lib.rs +++ b/polkadot/node/network/statement-distribution/src/lib.rs @@ -27,7 +27,7 @@ use std::time::Duration; use polkadot_node_network_protocol::{ request_response::{v1 as request_v1, v2::AttestedCandidateRequest, IncomingRequestReceiver}, - v2 as protocol_v2, vstaging as protocol_vstaging, Versioned, + v2 as protocol_v2, v3 as protocol_v3, Versioned, }; use polkadot_node_primitives::StatementWithPVD; use polkadot_node_subsystem::{ @@ -400,11 +400,11 @@ impl StatementDistributionSubsystem { Versioned::V2( protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) | - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + Versioned::V3( + protocol_v3::StatementDistributionMessage::V1Compatibility(_), ) => VersionTarget::Legacy, Versioned::V1(_) => VersionTarget::Legacy, - Versioned::V2(_) | Versioned::VStaging(_) => VersionTarget::Current, + Versioned::V2(_) | Versioned::V3(_) => VersionTarget::Current, }, _ => VersionTarget::Both, }; diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 406f11305909..2f06d3685b81 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -29,8 +29,7 @@ use polkadot_node_network_protocol::{ MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, }, v2::{self as protocol_v2, StatementFilter}, - vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + v3 as protocol_v3, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, @@ -366,7 +365,7 @@ pub(crate) async fn handle_network_update( gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); let versioned_protocol = if protocol_version != ValidationVersion::V2.into() && - protocol_version != ValidationVersion::VStaging.into() + protocol_version != ValidationVersion::V3.into() { return } else { @@ -432,28 +431,28 @@ pub(crate) async fn handle_network_update( net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::V1Compatibility(_), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::V1Compatibility(_), ) => return, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::Statement(relay_parent, statement), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::Statement(relay_parent, statement), ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement, reputation) .await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateManifest(inner), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::BackedCandidateManifest(inner), ) => handle_incoming_manifest(ctx, state, peer_id, inner, reputation).await, net_protocol::StatementDistributionMessage::V2( protocol_v2::StatementDistributionMessage::BackedCandidateKnown(inner), ) | - net_protocol::StatementDistributionMessage::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), + net_protocol::StatementDistributionMessage::V3( + protocol_v3::StatementDistributionMessage::BackedCandidateKnown(inner), ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner, reputation).await, }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => @@ -806,13 +805,13 @@ fn pending_statement_network_message( protocol_v2::StatementDistributionMessage::Statement(relay_parent, signed) }) .map(|msg| (vec![peer.0], Versioned::V2(msg).into())), - ValidationVersion::VStaging => statement_store + ValidationVersion::V3 => statement_store .validator_statement(originator, compact) .map(|s| s.as_unchecked().clone()) .map(|signed| { - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) + protocol_v3::StatementDistributionMessage::Statement(relay_parent, signed) }) - .map(|msg| (vec![peer.0], Versioned::VStaging(msg).into())), + .map(|msg| (vec![peer.0], Versioned::V3(msg).into())), ValidationVersion::V1 => { gum::error!( target: LOG_TARGET, @@ -945,10 +944,10 @@ async fn send_pending_grid_messages( ) .into(), )), - ValidationVersion::VStaging => messages.push(( + ValidationVersion::V3 => messages.push(( vec![peer_id.0], - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + Versioned::V3( + protocol_v3::StatementDistributionMessage::BackedCandidateManifest( manifest, ), ) @@ -960,7 +959,7 @@ async fn send_pending_grid_messages( "Bug ValidationVersion::V1 should not be used in statement-distribution v2, legacy should have handled this" ); - } + }, }; }, grid::ManifestKind::Acknowledgement => { @@ -1308,8 +1307,8 @@ async fn circulate_statement( let statement_to_v2_peers = filter_by_peer_version(&statement_to_peers, ValidationVersion::V2.into()); - let statement_to_vstaging_peers = - filter_by_peer_version(&statement_to_peers, ValidationVersion::VStaging.into()); + let statement_to_v3_peers = + filter_by_peer_version(&statement_to_peers, ValidationVersion::V3.into()); // ship off the network messages to the network bridge. if !statement_to_v2_peers.is_empty() { @@ -1331,17 +1330,17 @@ async fn circulate_statement( .await; } - if !statement_to_vstaging_peers.is_empty() { + if !statement_to_v3_peers.is_empty() { gum::debug!( target: LOG_TARGET, ?compact_statement, n_peers = ?statement_to_peers.len(), - "Sending statement to vstaging peers", + "Sending statement to v3 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - statement_to_vstaging_peers, - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + statement_to_v3_peers, + Versioned::V3(protocol_v3::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), )) @@ -1887,8 +1886,7 @@ async fn provide_candidate_to_grid( } let manifest_peers_v2 = filter_by_peer_version(&manifest_peers, ValidationVersion::V2.into()); - let manifest_peers_vstaging = - filter_by_peer_version(&manifest_peers, ValidationVersion::VStaging.into()); + let manifest_peers_v3 = filter_by_peer_version(&manifest_peers, ValidationVersion::V3.into()); if !manifest_peers_v2.is_empty() { gum::debug!( target: LOG_TARGET, @@ -1908,27 +1906,27 @@ async fn provide_candidate_to_grid( .await; } - if !manifest_peers_vstaging.is_empty() { + if !manifest_peers_v3.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, local_validator = ?per_session.local_validator, - n_peers = manifest_peers_vstaging.len(), - "Sending manifest to vstaging peers" + n_peers = manifest_peers_v3.len(), + "Sending manifest to v3 peers" ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - manifest_peers_vstaging, - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), - ) + manifest_peers_v3, + Versioned::V3(protocol_v3::StatementDistributionMessage::BackedCandidateManifest( + manifest, + )) .into(), )) .await; } let ack_peers_v2 = filter_by_peer_version(&ack_peers, ValidationVersion::V2.into()); - let ack_peers_vstaging = filter_by_peer_version(&ack_peers, ValidationVersion::VStaging.into()); + let ack_peers_v3 = filter_by_peer_version(&ack_peers, ValidationVersion::V3.into()); if !ack_peers_v2.is_empty() { gum::debug!( target: LOG_TARGET, @@ -1948,22 +1946,20 @@ async fn provide_candidate_to_grid( .await; } - if !ack_peers_vstaging.is_empty() { + if !ack_peers_v3.is_empty() { gum::debug!( target: LOG_TARGET, ?candidate_hash, local_validator = ?per_session.local_validator, - n_peers = ack_peers_vstaging.len(), - "Sending acknowledgement to vstaging peers" + n_peers = ack_peers_v3.len(), + "Sending acknowledgement to v3 peers" ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - ack_peers_vstaging, - Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown( - acknowledgement, - ), - ) + ack_peers_v3, + Versioned::V3(protocol_v3::StatementDistributionMessage::BackedCandidateKnown( + acknowledgement, + )) .into(), )) .await; @@ -2293,8 +2289,8 @@ fn post_acknowledgement_statement_messages( ) .into(), )), - ValidationVersion::VStaging => messages.push(Versioned::VStaging( - protocol_vstaging::StatementDistributionMessage::Statement( + ValidationVersion::V3 => messages.push(Versioned::V3( + protocol_v3::StatementDistributionMessage::Statement( relay_parent, statement.as_unchecked().clone(), ) @@ -2441,9 +2437,9 @@ fn acknowledgement_and_statement_messages( let mut messages = match peer.1 { ValidationVersion::V2 => vec![(vec![peer.0], msg_v2.into())], - ValidationVersion::VStaging => vec![( + ValidationVersion::V3 => vec![( vec![peer.0], - Versioned::VStaging(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( + Versioned::V3(protocol_v2::StatementDistributionMessage::BackedCandidateKnown( acknowledgement, )) .into(), diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 116116659cb1..aa1a473b833f 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -2830,7 +2830,7 @@ fn inactive_local_participates_in_grid() { send_peer_message( &mut overseer, peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + protocol_v3::StatementDistributionMessage::BackedCandidateManifest(manifest), ) .await; diff --git a/polkadot/node/primitives/src/approval.rs b/polkadot/node/primitives/src/approval.rs index cc9136b8ae39..f2a79e025aff 100644 --- a/polkadot/node/primitives/src/approval.rs +++ b/polkadot/node/primitives/src/approval.rs @@ -219,7 +219,9 @@ pub mod v2 { use std::ops::BitOr; use bitvec::{prelude::Lsb0, vec::BitVec}; - use polkadot_primitives::{CandidateIndex, CoreIndex, Hash, ValidatorIndex}; + use polkadot_primitives::{ + CandidateIndex, CoreIndex, Hash, ValidatorIndex, ValidatorSignature, + }; /// A static context associated with producing randomness for a core. pub const CORE_RANDOMNESS_CONTEXT: &[u8] = b"A&V CORE v2"; @@ -473,6 +475,59 @@ pub mod v2 { }) } } + + impl From for IndirectSignedApprovalVoteV2 { + fn from(value: super::v1::IndirectSignedApprovalVote) -> Self { + Self { + block_hash: value.block_hash, + validator: value.validator, + candidate_indices: value.candidate_index.into(), + signature: value.signature, + } + } + } + + /// Errors that can occur when trying to convert to/from approvals v1/v2 + #[derive(Debug)] + pub enum ApprovalConversionError { + /// More than one candidate was signed. + MoreThanOneCandidate(usize), + } + + impl TryFrom for super::v1::IndirectSignedApprovalVote { + type Error = ApprovalConversionError; + + fn try_from(value: IndirectSignedApprovalVoteV2) -> Result { + if value.candidate_indices.count_ones() != 1 { + return Err(ApprovalConversionError::MoreThanOneCandidate( + value.candidate_indices.count_ones(), + )) + } + Ok(Self { + block_hash: value.block_hash, + validator: value.validator, + candidate_index: value.candidate_indices.first_one().expect("Qed we checked above") + as u32, + signature: value.signature, + }) + } + } + + /// A signed approval vote which references the candidate indirectly via the block. + /// + /// In practice, we have a look-up from block hash and candidate index to candidate hash, + /// so this can be transformed into a `SignedApprovalVote`. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct IndirectSignedApprovalVoteV2 { + /// A block hash where the candidate appears. + pub block_hash: Hash, + /// The index of the candidate in the list of candidates fully included as-of the block. + pub candidate_indices: CandidateBitfield, + /// The validator index. + pub validator: ValidatorIndex, + /// The signature by the validator. + pub signature: ValidatorSignature, + } } #[cfg(test)] diff --git a/polkadot/node/primitives/src/disputes/message.rs b/polkadot/node/primitives/src/disputes/message.rs index 89d3ea6c0af9..31fe73a7ba1c 100644 --- a/polkadot/node/primitives/src/disputes/message.rs +++ b/polkadot/node/primitives/src/disputes/message.rs @@ -170,7 +170,7 @@ impl DisputeMessage { let valid_vote = ValidDisputeVote { validator_index: valid_index, signature: valid_statement.validator_signature().clone(), - kind: *valid_kind, + kind: valid_kind.clone(), }; let invalid_vote = InvalidDisputeVote { diff --git a/polkadot/node/primitives/src/disputes/mod.rs b/polkadot/node/primitives/src/disputes/mod.rs index 500b705be957..768b95f65537 100644 --- a/polkadot/node/primitives/src/disputes/mod.rs +++ b/polkadot/node/primitives/src/disputes/mod.rs @@ -46,6 +46,15 @@ pub struct SignedDisputeStatement { session_index: SessionIndex, } +/// Errors encountered while signing a dispute statement +#[derive(Debug)] +pub enum SignedDisputeStatementError { + /// Encountered a keystore error while signing + KeyStoreError(KeystoreError), + /// Could not generate signing payload + PayloadError, +} + /// Tracked votes on candidates, for the purposes of dispute resolution. #[derive(Debug, Clone)] pub struct CandidateVotes { @@ -107,8 +116,9 @@ impl ValidCandidateVotes { ValidDisputeStatementKind::BackingValid(_) | ValidDisputeStatementKind::BackingSeconded(_) => false, ValidDisputeStatementKind::Explicit | - ValidDisputeStatementKind::ApprovalChecking => { - occupied.insert((kind, sig)); + ValidDisputeStatementKind::ApprovalChecking | + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(_) => { + occupied.insert((kind.clone(), sig)); kind != occupied.get().0 }, }, @@ -213,16 +223,19 @@ impl SignedDisputeStatement { candidate_hash: CandidateHash, session_index: SessionIndex, validator_public: ValidatorId, - ) -> Result, KeystoreError> { + ) -> Result, SignedDisputeStatementError> { let dispute_statement = if valid { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) } else { DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) }; - let data = dispute_statement.payload_data(candidate_hash, session_index); + let data = dispute_statement + .payload_data(candidate_hash, session_index) + .map_err(|_| SignedDisputeStatementError::PayloadError)?; let signature = keystore - .sr25519_sign(ValidatorId::ID, validator_public.as_ref(), &data)? + .sr25519_sign(ValidatorId::ID, validator_public.as_ref(), &data) + .map_err(SignedDisputeStatementError::KeyStoreError)? .map(|sig| Self { dispute_statement, candidate_hash, diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 448ab605aa92..81eff49ee305 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -225,7 +225,3 @@ runtime-metrics = [ "rococo-runtime?/runtime-metrics", "westend-runtime?/runtime-metrics", ] - -network-protocol-staging = [ - "polkadot-node-network-protocol/network-protocol-staging", -] diff --git a/polkadot/node/service/src/parachains_db/upgrade.rs b/polkadot/node/service/src/parachains_db/upgrade.rs index 1d76c79d3e32..d22eebb5c8d4 100644 --- a/polkadot/node/service/src/parachains_db/upgrade.rs +++ b/polkadot/node/service/src/parachains_db/upgrade.rs @@ -20,10 +20,16 @@ use std::{ fs, io, path::{Path, PathBuf}, str::FromStr, + sync::Arc, }; -use polkadot_node_core_approval_voting::approval_db::v2::{ - migration_helpers::v1_to_v2, Config as ApprovalDbConfig, +use polkadot_node_core_approval_voting::approval_db::{ + common::{Config as ApprovalDbConfig, Result as ApprovalDbResult}, + v2::migration_helpers::v1_to_latest, + v3::migration_helpers::v2_to_latest, +}; +use polkadot_node_subsystem_util::database::{ + kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, Database, }; type Version = u32; @@ -32,7 +38,9 @@ const VERSION_FILE_NAME: &'static str = "parachain_db_version"; /// Current db version. /// Version 4 changes approval db format for `OurAssignment`. -pub(crate) const CURRENT_VERSION: Version = 4; +/// Version 5 changes approval db format to hold some additional +/// information about delayed approvals. +pub(crate) const CURRENT_VERSION: Version = 5; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -101,7 +109,8 @@ pub(crate) fn try_upgrade_db_to_next_version( // 2 -> 3 migration Some(2) => migrate_from_version_2_to_3(db_path, db_kind)?, // 3 -> 4 migration - Some(3) => migrate_from_version_3_to_4(db_path, db_kind)?, + Some(3) => migrate_from_version_3_or_4_to_5(db_path, db_kind, v1_to_latest)?, + Some(4) => migrate_from_version_3_or_4_to_5(db_path, db_kind, v2_to_latest)?, // Already at current version, do nothing. Some(CURRENT_VERSION) => CURRENT_VERSION, // This is an arbitrary future version, we don't handle it. @@ -174,14 +183,19 @@ fn migrate_from_version_1_to_2(path: &Path, db_kind: DatabaseKind) -> Result Result { +fn migrate_from_version_3_or_4_to_5( + path: &Path, + db_kind: DatabaseKind, + migration_function: F, +) -> Result +where + F: Fn(Arc, ApprovalDbConfig) -> ApprovalDbResult<()>, +{ gum::info!(target: LOG_TARGET, "Migrating parachains db from version 3 to version 4 ..."); - use polkadot_node_subsystem_util::database::{ - kvdb_impl::DbAdapter as RocksDbAdapter, paritydb_impl::DbAdapter as ParityDbAdapter, - }; - use std::sync::Arc; let approval_db_config = ApprovalDbConfig { col_approval_data: super::REAL_COLUMNS.col_approval_data }; @@ -194,7 +208,8 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result { let db_path = path @@ -207,7 +222,8 @@ fn migrate_from_version_3_to_4(path: &Path, db_kind: DatabaseKind) -> Result), /// Get the node features. NodeFeatures(SessionIndex, RuntimeApiSender), + /// Approval voting params + /// `V10` + ApprovalVotingParams(SessionIndex, RuntimeApiSender), } impl RuntimeApiRequest { @@ -751,6 +755,9 @@ impl RuntimeApiRequest { /// `Node features` pub const NODE_FEATURES_RUNTIME_REQUIREMENT: u32 = 9; + + /// `approval_voting_params` + pub const APPROVAL_VOTING_PARAMS_REQUIREMENT: u32 = 10; } /// A message to the Runtime API subsystem. @@ -936,7 +943,7 @@ pub enum ApprovalVotingMessage { /// protocol. /// /// Should not be sent unless the block hash within the indirect vote is known. - CheckAndImportApproval(IndirectSignedApprovalVote, oneshot::Sender), + CheckAndImportApproval(IndirectSignedApprovalVoteV2, oneshot::Sender), /// Returns the highest possible ancestor hash of the provided block hash which is /// acceptable to vote on finality for. /// The `BlockNumber` provided is the number of the block's ancestor which is the @@ -952,7 +959,7 @@ pub enum ApprovalVotingMessage { /// requires calling into `approval-distribution`: Calls should be infrequent and bounded. GetApprovalSignaturesForCandidate( CandidateHash, - oneshot::Sender>, + oneshot::Sender, ValidatorSignature)>>, ), } @@ -968,7 +975,7 @@ pub enum ApprovalDistributionMessage { /// Distribute an approval vote for the local validator. The approval vote is assumed to be /// valid, relevant, and the corresponding approval already issued. /// If not, the subsystem is free to drop the message. - DistributeApproval(IndirectSignedApprovalVote), + DistributeApproval(IndirectSignedApprovalVoteV2), /// An update from the network bridge. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), @@ -976,7 +983,7 @@ pub enum ApprovalDistributionMessage { /// Get all approval signatures for all chains a candidate appeared in. GetApprovalSignatures( HashSet<(Hash, CandidateIndex)>, - oneshot::Sender>, + oneshot::Sender, ValidatorSignature)>>, ), /// Approval checking lag update measured in blocks. ApprovalCheckingLagUpdate(BlockNumber), diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 21df1483b9e6..7f6183076101 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,12 +16,15 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, runtime_api::ParachainHost, slashing, vstaging, Block, BlockNumber, - CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, - DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + async_backing, + runtime_api::ParachainHost, + slashing, + vstaging::{self, ApprovalVotingParams}, + Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + Header, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use sc_client_api::HeaderBackend; use sc_transaction_pool_api::OffchainTransactionPoolFactory; @@ -316,9 +319,16 @@ pub trait RuntimeApiSubsystemClient { async fn disabled_validators(&self, at: Hash) -> Result, ApiError>; // === v9 === - /// Get the node features. async fn node_features(&self, at: Hash) -> Result; + + // == v10: Approval voting params == + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + at: Hash, + session_index: SessionIndex, + ) -> Result; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -575,4 +585,13 @@ where async fn disabled_validators(&self, at: Hash) -> Result, ApiError> { self.client.runtime_api().disabled_validators(at) } + + /// Approval voting configuration parameters + async fn approval_voting_params( + &self, + at: Hash, + _session_index: SessionIndex, + ) -> Result { + self.client.runtime_api().approval_voting_params(at) + } } diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 8e4022a718fa..29d36452eee6 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -39,6 +39,3 @@ sc-service = { path = "../../../../../substrate/client/service" } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } - -[features] -network-protocol-staging = ["polkadot-cli/network-protocol-staging"] diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 331728b25902..d661005e32ff 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,12 +114,14 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, vstaging, AsyncBackingParams, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, slashing, + vstaging::{self, ApprovalVotingParams}, + AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; + use polkadot_core_primitives as pcp; use polkadot_parachain_primitives::primitives as ppp; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -274,5 +276,10 @@ sp_api::decl_runtime_apis! { /// This is a staging method! Do not use on production runtimes! #[api_version(9)] fn node_features() -> vstaging::NodeFeatures; + + /***** Added in v10 *****/ + /// Approval voting configuration parameters + #[api_version(10)] + fn approval_voting_params() -> ApprovalVotingParams; } } diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 83b590dc3203..c3a947644fff 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -1070,6 +1070,26 @@ impl ApprovalVote { } } +/// A vote of approval for multiple candidates. +#[derive(Clone, RuntimeDebug)] +pub struct ApprovalVoteMultipleCandidates<'a>(pub &'a [CandidateHash]); + +impl<'a> ApprovalVoteMultipleCandidates<'a> { + /// Yields the signing payload for this approval vote. + pub fn signing_payload(&self, session_index: SessionIndex) -> Vec { + const MAGIC: [u8; 4] = *b"APPR"; + // Make this backwards compatible with `ApprovalVote` so if we have just on candidate the + // signature will look the same. + // This gives us the nice benefit that old nodes can still check signatures when len is 1 + // and the new node can check the signature coming from old nodes. + if self.0.len() == 1 { + (MAGIC, self.0.first().expect("QED: we just checked"), session_index).encode() + } else { + (MAGIC, &self.0, session_index).encode() + } + } +} + /// Custom validity errors used in Polkadot while validating transactions. #[repr(u8)] pub enum ValidityError { @@ -1246,25 +1266,42 @@ pub enum DisputeStatement { impl DisputeStatement { /// Get the payload data for this type of dispute statement. - pub fn payload_data(&self, candidate_hash: CandidateHash, session: SessionIndex) -> Vec { - match *self { + /// + /// Returns Error if the candidate_hash is not included in the list of signed + /// candidate from ApprovalCheckingMultipleCandidate. + pub fn payload_data( + &self, + candidate_hash: CandidateHash, + session: SessionIndex, + ) -> Result, ()> { + match self { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) => - ExplicitDisputeStatement { valid: true, candidate_hash, session }.signing_payload(), + Ok(ExplicitDisputeStatement { valid: true, candidate_hash, session } + .signing_payload()), DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded( inclusion_parent, - )) => CompactStatement::Seconded(candidate_hash).signing_payload(&SigningContext { + )) => Ok(CompactStatement::Seconded(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, - }), + parent_hash: *inclusion_parent, + })), DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(inclusion_parent)) => - CompactStatement::Valid(candidate_hash).signing_payload(&SigningContext { + Ok(CompactStatement::Valid(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, - }), + parent_hash: *inclusion_parent, + })), DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) => - ApprovalVote(candidate_hash).signing_payload(session), + Ok(ApprovalVote(candidate_hash).signing_payload(session)), + DisputeStatement::Valid( + ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(candidate_hashes), + ) => + if candidate_hashes.contains(&candidate_hash) { + Ok(ApprovalVoteMultipleCandidates(candidate_hashes).signing_payload(session)) + } else { + Err(()) + }, DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) => - ExplicitDisputeStatement { valid: false, candidate_hash, session }.signing_payload(), + Ok(ExplicitDisputeStatement { valid: false, candidate_hash, session } + .signing_payload()), } } @@ -1276,7 +1313,7 @@ impl DisputeStatement { session: SessionIndex, validator_signature: &ValidatorSignature, ) -> Result<(), ()> { - let payload = self.payload_data(candidate_hash, session); + let payload = self.payload_data(candidate_hash, session)?; if validator_signature.verify(&payload[..], &validator_public) { Ok(()) @@ -1308,13 +1345,14 @@ impl DisputeStatement { Self::Valid(ValidDisputeStatementKind::BackingValid(_)) => true, Self::Valid(ValidDisputeStatementKind::Explicit) | Self::Valid(ValidDisputeStatementKind::ApprovalChecking) | + Self::Valid(ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates(_)) | Self::Invalid(_) => false, } } } /// Different kinds of statements of validity on a candidate. -#[derive(Encode, Decode, Copy, Clone, PartialEq, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug, TypeInfo)] pub enum ValidDisputeStatementKind { /// An explicit statement issued as part of a dispute. #[codec(index = 0)] @@ -1328,6 +1366,12 @@ pub enum ValidDisputeStatementKind { /// An approval vote from the approval checking phase. #[codec(index = 3)] ApprovalChecking, + /// An approval vote from the new version. + /// We can't create this version untill all nodes + /// have been updated to support it and max_approval_coalesce_count + /// is set to more than 1. + #[codec(index = 4)] + ApprovalCheckingMultipleCandidates(Vec), } /// Different kinds of statements of invalidity on a candidate. diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 1639dc15e212..630bcf8679ad 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,6 +17,38 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here +pub use crate::v6::*; +use sp_std::prelude::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +/// Approval voting configuration parameters +#[derive( + RuntimeDebug, + Copy, + Clone, + PartialEq, + Encode, + Decode, + TypeInfo, + serde::Serialize, + serde::Deserialize, +)] +pub struct ApprovalVotingParams { + /// The maximum number of candidates `approval-voting` can vote for with + /// a single signatures. + /// + /// Setting it to 1, means we send the approval as soon as we have it available. + pub max_approval_coalesce_count: u32, +} + +impl Default for ApprovalVotingParams { + fn default() -> Self { + Self { max_approval_coalesce_count: 1 } + } +} use bitvec::vec::BitVec; diff --git a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md index 1a17f90d9ba3..345b3d2e6970 100644 --- a/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/polkadot/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -4,10 +4,13 @@ Reading the [section on the approval protocol](../../protocol-approval.md) will aims of this subsystem. Approval votes are split into two parts: Assignments and Approvals. Validators first broadcast their assignment to -indicate intent to check a candidate. Upon successfully checking, they broadcast an approval vote. If a validator -doesn't broadcast their approval vote shortly after issuing an assignment, this is an indication that they are being -prevented from recovering or validating the block data and that more validators should self-select to check the -candidate. This is known as a "no-show". +indicate intent to check a candidate. Upon successfully checking, they don't immediately send the vote instead +they queue the check for a short period of time `MAX_APPROVAL_COALESCE_WAIT_TICKS` to give the opportunity of the +validator to vote for more than one candidate. Once MAX_APPROVAL_COALESCE_WAIT_TICKS have passed or at least +`MAX_APPROVAL_COALESCE_COUNT` are ready they broadcast an approval vote for all candidates. If a validator +doesn't broadcast their approval vote shortly after issuing an assignment, this is an indication that they are +being prevented from recovering or validating the block data and that more validators should self-select to +check the candidate. This is known as a "no-show". The core of this subsystem is a Tick-based timer loop, where Ticks are 500ms. We also reason about time in terms of `DelayTranche`s, which measure the number of ticks elapsed since a block was produced. We track metadata for all @@ -120,6 +123,13 @@ struct BlockEntry { // this block. The block can be considered approved has all bits set to 1 approved_bitfield: Bitfield, children: Vec, + // A list of candidates we have checked, but didn't not sign and + // advertise the vote yet. + candidates_pending_signature: BTreeMap, + // Assignments we already distributed. A 1 bit means the candidate index for which + // we already have sent out an assignment. We need this to avoid distributing + // multiple core assignments more than once. + distributed_assignments: Bitfield, } // slot_duration * 2 + DelayTranche gives the number of delay tranches since the @@ -303,12 +313,12 @@ entry. The cert itself contains information necessary to determine the candidate On receiving a `CheckAndImportApproval(indirect_approval_vote, response_channel)` message: * Fetch the `BlockEntry` from the indirect approval vote's `block_hash`. If none, return `ApprovalCheckResult::Bad`. - * Fetch the `CandidateEntry` from the indirect approval vote's `candidate_index`. If the block did not trigger + * Fetch all `CandidateEntry` from the indirect approval vote's `candidate_indices`. If the block did not trigger inclusion of enough candidates, return `ApprovalCheckResult::Bad`. - * Construct a `SignedApprovalVote` using the candidate hash and check against the validator's approval key, based on - the session info of the block. If invalid or no such validator, return `ApprovalCheckResult::Bad`. + * Construct a `SignedApprovalVote` using the candidates hashes and check against the validator's approval key, + based on the session info of the block. If invalid or no such validator, return `ApprovalCheckResult::Bad`. * Send `ApprovalCheckResult::Accepted` - * [Import the checked approval vote](#import-checked-approval) + * [Import the checked approval vote](#import-checked-approval) for all candidates #### `ApprovalVotingMessage::ApprovedAncestor` @@ -402,10 +412,25 @@ On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: #### Issue Approval Vote * Fetch the block entry and candidate entry. Ignore if `None` - we've probably just lost a race with finality. - * Construct a `SignedApprovalVote` with the validator index for the session. * [Import the checked approval vote](#import-checked-approval). It is "checked" as we've just issued the signature. - * Construct a `IndirectSignedApprovalVote` using the information about the vote. - * Dispatch `ApprovalDistributionMessage::DistributeApproval`. + * IF `MAX_APPROVAL_COALESCE_COUNT` candidates are in the waiting queue + * Construct a `SignedApprovalVote` with the validator index for the session and all candidate hashes in the waiting queue. + * Construct a `IndirectSignedApprovalVote` using the information about the vote. + * Dispatch `ApprovalDistributionMessage::DistributeApproval`. + * ELSE + * Queue the candidate in the `BlockEntry::candidates_pending_signature` + * Arm a per BlockEntry timer with latest tick we can send the vote. + +### Delayed vote distribution + * [Issue Approval Vote](#issue-approval-vote) arms once a per block timer if there are no requirements to send the + vote immediately. + * When the timer wakes up it will either: + * IF there is a candidate in the queue past its sending tick: + * Construct a `SignedApprovalVote` with the validator index for the session and all candidate hashes in the waiting queue. + * Construct a `IndirectSignedApprovalVote` using the information about the vote. + * Dispatch `ApprovalDistributionMessage::DistributeApproval`. + * ELSE + * Re-arm the timer with latest tick we have the send a the vote. ### Determining Approval of Candidate diff --git a/polkadot/roadmap/implementers-guide/src/protocol-approval.md b/polkadot/roadmap/implementers-guide/src/protocol-approval.md index 70bc0233d65a..b6aa16646ad2 100644 --- a/polkadot/roadmap/implementers-guide/src/protocol-approval.md +++ b/polkadot/roadmap/implementers-guide/src/protocol-approval.md @@ -296,6 +296,18 @@ provide somewhat more security. TODO: When? Is this optimal for the network? etc. +## Approval coalescing +To reduce the necessary network bandwidth and cpu time when a validator has more than one candidate to approve we are +doing our best effort to send a single message that approves all available candidates with a single signature. +The implemented heuristic, is that each time we are ready to create a signature and send a vote for a candidate we +delay sending it until one of three things happen: +- We gathered a maximum of `MAX_APPROVAL_COALESCE_COUNT` candidates that we have already checked and we are + ready to sign approval for. +- `MAX_APPROVAL_COALESCE_WAIT_TICKS` have passed since checking oldest candidate and we were ready to sign + and send the approval message. +- We are already in the last third of the no-show period in order to avoid creating accidental no-shows, which in + turn might trigger other assignments. + ## On-chain verification We should verify approval on-chain to reward approval checkers. We therefore require the "no show" timeout to be longer diff --git a/polkadot/runtime/parachains/src/builder.rs b/polkadot/runtime/parachains/src/builder.rs index dced24df0aec..23916bbdc8ad 100644 --- a/polkadot/runtime/parachains/src/builder.rs +++ b/polkadot/runtime/parachains/src/builder.rs @@ -636,7 +636,7 @@ impl BenchBuilder { } else { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) }; - let data = dispute_statement.payload_data(candidate_hash, session); + let data = dispute_statement.payload_data(candidate_hash, session).unwrap(); let statement_sig = validator_public.sign(&data).unwrap(); (dispute_statement, ValidatorIndex(validator_index), statement_sig) diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index bff9cc34b4fb..272c227dfefe 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,8 +26,9 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, - SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, + LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; @@ -263,6 +264,8 @@ pub struct HostConfiguration { pub minimum_backing_votes: u32, /// Node features enablement. pub node_features: NodeFeatures, + /// Params used by approval-voting + pub approval_voting_params: ApprovalVotingParams, } impl> Default for HostConfiguration { @@ -308,6 +311,7 @@ impl> Default for HostConfiguration /// v8-v9: /// v9-v10: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(10); + /// v10-11: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(11); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -1191,6 +1196,7 @@ pub mod pallet { config.on_demand_ttl = new; }) } + /// Set the minimum backing votes threshold. #[pallet::call_index(52)] #[pallet::weight(( @@ -1203,6 +1209,7 @@ pub mod pallet { config.minimum_backing_votes = new; }) } + /// Set/Unset a node feature. #[pallet::call_index(53)] #[pallet::weight(( @@ -1220,6 +1227,22 @@ pub mod pallet { config.node_features.set(index, value); }) } + + /// Set approval-voting-params. + #[pallet::call_index(54)] + #[pallet::weight(( + T::WeightInfo::set_config_with_executor_params(), + DispatchClass::Operational, + ))] + pub fn set_approval_voting_params( + origin: OriginFor, + new: ApprovalVotingParams, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.approval_voting_params = new; + }) + } } #[pallet::hooks] diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index db323d3aad93..2838b73092db 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -17,6 +17,7 @@ //! A module that is responsible for migration of storage. pub mod v10; +pub mod v11; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs index 3c934082dc1e..cf228610e5c9 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v10.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -16,17 +16,121 @@ //! A module that is responsible for migration of storage. -use crate::configuration::{self, Config, Pallet}; +use crate::configuration::{Config, Pallet}; use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{vstaging::NodeFeatures, SessionIndex}; +use primitives::{ + vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParams, SessionIndex, + LEGACY_MIN_BACKING_VOTES, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; +use sp_runtime::Perbill; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use super::v9::V9HostConfiguration; +// All configuration of the runtime with respect to paras. +#[derive(Clone, Encode, PartialEq, Decode, Debug)] +pub struct V10HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub on_demand_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, + pub node_features: NodeFeatures, +} -type V10HostConfiguration = configuration::HostConfiguration; +impl> Default for V10HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + on_demand_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + node_features: NodeFeatures::EMPTY, + } + } +} mod v9 { use super::*; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v11.rs b/polkadot/runtime/parachains/src/configuration/migration/v11.rs new file mode 100644 index 000000000000..b7dec7070f93 --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v11.rs @@ -0,0 +1,329 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, Config, Pallet}; +use frame_support::{ + migrations::VersionedMigration, pallet_prelude::*, traits::Defensive, weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{vstaging::ApprovalVotingParams, SessionIndex}; +use sp_std::vec::Vec; + +use frame_support::traits::OnRuntimeUpgrade; + +use super::v10::V10HostConfiguration; +type V11HostConfiguration = configuration::HostConfiguration; + +mod v10 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V10HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V10HostConfiguration>)>, + OptionQuery, + >; +} + +mod v11 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V11HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V11HostConfiguration>)>, + OptionQuery, + >; +} + +pub type MigrateToV11 = VersionedMigration< + 10, + 11, + UncheckedMigrateToV11, + Pallet, + ::DbWeight, +>; + +pub struct UncheckedMigrateToV11(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for UncheckedMigrateToV11 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV11"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV11 started"); + let weight_consumed = migrate_to_v11::(); + + log::info!(target: configuration::LOG_TARGET, "HostConfiguration MigrateToV11 executed successfully"); + + weight_consumed + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV11"); + ensure!( + StorageVersion::get::>() >= 11, + "Storage version should be >= 11 after the migration" + ); + + Ok(()) + } +} + +fn migrate_to_v11() -> Weight { + // Unusual formatting is justified: + // - make it easier to verify that fields assign what they supposed to assign. + // - this code is transient and will be removed after all migrations are done. + // - this code is important enough to optimize for legibility sacrificing consistency. + #[rustfmt::skip] + let translate = + |pre: V10HostConfiguration>| -> + V11HostConfiguration> + { + V11HostConfiguration { +max_code_size : pre.max_code_size, +max_head_data_size : pre.max_head_data_size, +max_upward_queue_count : pre.max_upward_queue_count, +max_upward_queue_size : pre.max_upward_queue_size, +max_upward_message_size : pre.max_upward_message_size, +max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, +hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, +validation_upgrade_cooldown : pre.validation_upgrade_cooldown, +validation_upgrade_delay : pre.validation_upgrade_delay, +max_pov_size : pre.max_pov_size, +max_downward_message_size : pre.max_downward_message_size, +hrmp_sender_deposit : pre.hrmp_sender_deposit, +hrmp_recipient_deposit : pre.hrmp_recipient_deposit, +hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, +hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, +hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, +hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, +hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, +code_retention_period : pre.code_retention_period, +on_demand_cores : pre.on_demand_cores, +on_demand_retries : pre.on_demand_retries, +group_rotation_frequency : pre.group_rotation_frequency, +paras_availability_period : pre.paras_availability_period, +scheduling_lookahead : pre.scheduling_lookahead, +max_validators_per_core : pre.max_validators_per_core, +max_validators : pre.max_validators, +dispute_period : pre.dispute_period, +dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, +no_show_slots : pre.no_show_slots, +n_delay_tranches : pre.n_delay_tranches, +zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, +needed_approvals : pre.needed_approvals, +relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, +pvf_voting_ttl : pre.pvf_voting_ttl, +minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, +async_backing_params : pre.async_backing_params, +executor_params : pre.executor_params, +on_demand_queue_max_size : pre.on_demand_queue_max_size, +on_demand_base_fee : pre.on_demand_base_fee, +on_demand_fee_variability : pre.on_demand_fee_variability, +on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, +on_demand_ttl : pre.on_demand_ttl, +minimum_backing_votes : pre.minimum_backing_votes, +node_features : pre.node_features, +approval_voting_params : ApprovalVotingParams { + max_approval_coalesce_count: 1, + } + } + }; + + let v10 = v10::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v11 = translate(v10); + v11::ActiveConfig::::set(Some(v11)); + + // Allowed to be empty. + let pending_v9 = v10::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v10 = Vec::new(); + + for (session, v10) in pending_v9.into_iter() { + let v11 = translate(v10); + pending_v10.push((session, v11)); + } + v11::PendingConfigs::::set(Some(pending_v10.clone())); + + let num_configs = (pending_v10.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use primitives::LEGACY_MIN_BACKING_VOTES; + + use super::*; + use crate::mock::{new_test_ext, Test}; + + #[test] + fn v11_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![" + 0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000000000000000001027000080b2e60e80c3c9018096980000000000000000000000000005000000140000000400000001000000010100000000060000006400000002000000190000000000000002000000020000000200000005000000020000000001000000" + ]; + + let v11 = + V11HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v11.max_code_size, 3_145_728); + assert_eq!(v11.validation_upgrade_cooldown, 2); + assert_eq!(v11.max_pov_size, 5_242_880); + assert_eq!(v11.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v11.n_delay_tranches, 25); + assert_eq!(v11.minimum_validation_upgrade_delay, 5); + assert_eq!(v11.group_rotation_frequency, 20); + assert_eq!(v11.on_demand_cores, 0); + assert_eq!(v11.on_demand_base_fee, 10_000_000); + assert_eq!(v11.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v11.approval_voting_params.max_approval_coalesce_count, 1); + } + + #[test] + fn test_migrate_to_v11() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v10 = V10HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v10.clone())); + pending_configs.push((300, v10.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v10::ActiveConfig::::set(Some(v10)); + v10::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v11::(); + + let v11 = v11::ActiveConfig::::get().unwrap(); + assert_eq!(v11.approval_voting_params.max_approval_coalesce_count, 1); + + let mut configs_to_check = v11::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v11.clone())); + + for (_, v10) in configs_to_check { + #[rustfmt::skip] + { + assert_eq!(v10.max_code_size , v11.max_code_size); + assert_eq!(v10.max_head_data_size , v11.max_head_data_size); + assert_eq!(v10.max_upward_queue_count , v11.max_upward_queue_count); + assert_eq!(v10.max_upward_queue_size , v11.max_upward_queue_size); + assert_eq!(v10.max_upward_message_size , v11.max_upward_message_size); + assert_eq!(v10.max_upward_message_num_per_candidate , v11.max_upward_message_num_per_candidate); + assert_eq!(v10.hrmp_max_message_num_per_candidate , v11.hrmp_max_message_num_per_candidate); + assert_eq!(v10.validation_upgrade_cooldown , v11.validation_upgrade_cooldown); + assert_eq!(v10.validation_upgrade_delay , v11.validation_upgrade_delay); + assert_eq!(v10.max_pov_size , v11.max_pov_size); + assert_eq!(v10.max_downward_message_size , v11.max_downward_message_size); + assert_eq!(v10.hrmp_max_parachain_outbound_channels , v11.hrmp_max_parachain_outbound_channels); + assert_eq!(v10.hrmp_sender_deposit , v11.hrmp_sender_deposit); + assert_eq!(v10.hrmp_recipient_deposit , v11.hrmp_recipient_deposit); + assert_eq!(v10.hrmp_channel_max_capacity , v11.hrmp_channel_max_capacity); + assert_eq!(v10.hrmp_channel_max_total_size , v11.hrmp_channel_max_total_size); + assert_eq!(v10.hrmp_max_parachain_inbound_channels , v11.hrmp_max_parachain_inbound_channels); + assert_eq!(v10.hrmp_channel_max_message_size , v11.hrmp_channel_max_message_size); + assert_eq!(v10.code_retention_period , v11.code_retention_period); + assert_eq!(v10.on_demand_cores , v11.on_demand_cores); + assert_eq!(v10.on_demand_retries , v11.on_demand_retries); + assert_eq!(v10.group_rotation_frequency , v11.group_rotation_frequency); + assert_eq!(v10.paras_availability_period , v11.paras_availability_period); + assert_eq!(v10.scheduling_lookahead , v11.scheduling_lookahead); + assert_eq!(v10.max_validators_per_core , v11.max_validators_per_core); + assert_eq!(v10.max_validators , v11.max_validators); + assert_eq!(v10.dispute_period , v11.dispute_period); + assert_eq!(v10.no_show_slots , v11.no_show_slots); + assert_eq!(v10.n_delay_tranches , v11.n_delay_tranches); + assert_eq!(v10.zeroth_delay_tranche_width , v11.zeroth_delay_tranche_width); + assert_eq!(v10.needed_approvals , v11.needed_approvals); + assert_eq!(v10.relay_vrf_modulo_samples , v11.relay_vrf_modulo_samples); + assert_eq!(v10.pvf_voting_ttl , v11.pvf_voting_ttl); + assert_eq!(v10.minimum_validation_upgrade_delay , v11.minimum_validation_upgrade_delay); + assert_eq!(v10.async_backing_params.allowed_ancestry_len, v11.async_backing_params.allowed_ancestry_len); + assert_eq!(v10.async_backing_params.max_candidate_depth , v11.async_backing_params.max_candidate_depth); + assert_eq!(v10.executor_params , v11.executor_params); + assert_eq!(v10.minimum_backing_votes , v11.minimum_backing_votes); + }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. + } + }); + } + + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v11_no_pending() { + let v10 = V10HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v10 version in the state. + v10::ActiveConfig::::set(Some(v10)); + // Ensure there're no pending configs. + v11::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v11::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/migration/v8.rs b/polkadot/runtime/parachains/src/configuration/migration/v8.rs index d1bc90051125..537dfa9abd77 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v8.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v8.rs @@ -250,7 +250,7 @@ on_demand_fee_variability : Perbill::from_percent(3), on_demand_target_queue_utilization : Perbill::from_percent(25), on_demand_ttl : 5u32.into(), } - }; +}; let v7 = v7::ActiveConfig::::get() .defensive_proof("Could not decode old config") diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index b62a45355e1d..d88572d3b558 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -313,6 +313,7 @@ fn setting_pending_config_members() { pvf_voting_ttl: 3, minimum_validation_upgrade_delay: 20, executor_params: Default::default(), + approval_voting_params: ApprovalVotingParams { max_approval_coalesce_count: 1 }, on_demand_queue_max_size: 10_000u32, on_demand_base_fee: 10_000_000u128, on_demand_fee_variability: Perbill::from_percent(3), diff --git a/polkadot/runtime/parachains/src/disputes.rs b/polkadot/runtime/parachains/src/disputes.rs index cf2e99e7359a..c2383dad3053 100644 --- a/polkadot/runtime/parachains/src/disputes.rs +++ b/polkadot/runtime/parachains/src/disputes.rs @@ -25,11 +25,11 @@ use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; use polkadot_runtime_metrics::get_current_time; use primitives::{ - byzantine_threshold, supermajority_threshold, ApprovalVote, CandidateHash, - CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CompactStatement, ConsensusLog, - DisputeState, DisputeStatement, DisputeStatementSet, ExplicitDisputeStatement, - InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, SigningContext, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + byzantine_threshold, supermajority_threshold, vstaging::ApprovalVoteMultipleCandidates, + ApprovalVote, CandidateHash, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, + CompactStatement, ConsensusLog, DisputeState, DisputeStatement, DisputeStatementSet, + ExplicitDisputeStatement, InvalidDisputeStatementKind, MultiDisputeStatementSet, SessionIndex, + SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -952,6 +952,8 @@ impl Pallet { None => return StatementSetFilter::RemoveAll, }; + let config = >::config(); + let n_validators = session_info.validators.len(); // Check for ancient. @@ -1015,7 +1017,14 @@ impl Pallet { set.session, statement, signature, + // This is here to prevent malicious nodes of generating + // `ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates` before that + // is enabled, via setting `max_approval_coalesce_count` in the parachain host + // config. + config.approval_voting_params.max_approval_coalesce_count > 1, ) { + log::warn!("Failed to check dispute signature"); + importer.undo(undo); filter.remove_index(i); continue @@ -1260,22 +1269,31 @@ fn check_signature( session: SessionIndex, statement: &DisputeStatement, validator_signature: &ValidatorSignature, + approval_multiple_candidates_enabled: bool, ) -> Result<(), ()> { - let payload = match *statement { + let payload = match statement { DisputeStatement::Valid(ValidDisputeStatementKind::Explicit) => ExplicitDisputeStatement { valid: true, candidate_hash, session }.signing_payload(), DisputeStatement::Valid(ValidDisputeStatementKind::BackingSeconded(inclusion_parent)) => CompactStatement::Seconded(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, + parent_hash: *inclusion_parent, }), DisputeStatement::Valid(ValidDisputeStatementKind::BackingValid(inclusion_parent)) => CompactStatement::Valid(candidate_hash).signing_payload(&SigningContext { session_index: session, - parent_hash: inclusion_parent, + parent_hash: *inclusion_parent, }), DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) => ApprovalVote(candidate_hash).signing_payload(session), + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalCheckingMultipleCandidates( + candidates, + )) => + if approval_multiple_candidates_enabled && candidates.contains(&candidate_hash) { + ApprovalVoteMultipleCandidates(candidates).signing_payload(session) + } else { + return Err(()) + }, DisputeStatement::Invalid(InvalidDisputeStatementKind::Explicit) => ExplicitDisputeStatement { valid: false, candidate_hash, session }.signing_payload(), }; diff --git a/polkadot/runtime/parachains/src/disputes/tests.rs b/polkadot/runtime/parachains/src/disputes/tests.rs index 0757084084f6..1f3f00132d68 100644 --- a/polkadot/runtime/parachains/src/disputes/tests.rs +++ b/polkadot/runtime/parachains/src/disputes/tests.rs @@ -1500,7 +1500,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_1 + &signed_1, + true, ) .is_ok()); assert!(check_signature( @@ -1508,7 +1509,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1516,7 +1518,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_1, - &signed_1 + &signed_1, + true, ) .is_err()); assert!(check_signature( @@ -1524,7 +1527,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_1, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1532,7 +1536,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_1 + &signed_1, + true, ) .is_err()); assert!(check_signature( @@ -1540,7 +1545,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1548,7 +1554,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_1 + &signed_1, + true ) .is_err()); assert!(check_signature( @@ -1556,7 +1563,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_1 + &signed_1, + true, ) .is_err()); @@ -1565,7 +1573,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_2 + &signed_2, + true, ) .is_ok()); assert!(check_signature( @@ -1573,7 +1582,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1581,7 +1591,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_2, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1589,7 +1600,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_2, - &signed_2 + &signed_2, + true ) .is_err()); assert!(check_signature( @@ -1597,7 +1609,8 @@ fn test_check_signature() { candidate_hash, session, &wrong_statement_2, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1605,7 +1618,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1613,7 +1627,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1621,7 +1636,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_2 + &signed_2, + true, ) .is_err()); assert!(check_signature( @@ -1629,7 +1645,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_2 + &signed_2, + true, ) .is_err()); @@ -1638,7 +1655,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_ok()); assert!(check_signature( @@ -1646,7 +1664,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1654,7 +1673,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1662,7 +1682,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1670,7 +1691,8 @@ fn test_check_signature() { candidate_hash, session, &wrong_statement_3, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1678,7 +1700,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1686,7 +1709,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_3 + &signed_3, + true ) .is_err()); assert!(check_signature( @@ -1694,7 +1718,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_3 + &signed_3, + true, ) .is_err()); assert!(check_signature( @@ -1702,7 +1727,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_3 + &signed_3, + true, ) .is_err()); @@ -1711,7 +1737,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_ok()); assert!(check_signature( @@ -1719,7 +1746,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1727,7 +1755,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1735,7 +1764,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_4, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1743,7 +1773,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1751,7 +1782,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1759,7 +1791,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_4 + &signed_4, + true, ) .is_err()); assert!(check_signature( @@ -1767,7 +1800,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_4 + &signed_4, + true, ) .is_err()); @@ -1776,7 +1810,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_ok()); assert!(check_signature( @@ -1784,7 +1819,8 @@ fn test_check_signature() { candidate_hash, session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1792,7 +1828,8 @@ fn test_check_signature() { wrong_candidate_hash, session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1800,7 +1837,8 @@ fn test_check_signature() { candidate_hash, wrong_session, &statement_5, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1808,7 +1846,8 @@ fn test_check_signature() { candidate_hash, session, &statement_1, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1816,7 +1855,8 @@ fn test_check_signature() { candidate_hash, session, &statement_2, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1824,7 +1864,8 @@ fn test_check_signature() { candidate_hash, session, &statement_3, - &signed_5 + &signed_5, + true, ) .is_err()); assert!(check_signature( @@ -1832,7 +1873,8 @@ fn test_check_signature() { candidate_hash, session, &statement_4, - &signed_5 + &signed_5, + true, ) .is_err()); } diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 200fd57915f9..0da50f6a5373 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -17,7 +17,10 @@ //! Put implementations of functions from staging APIs here. use crate::{configuration, initializer, shared}; -use primitives::{vstaging::NodeFeatures, ValidatorIndex}; +use primitives::{ + vstaging::{ApprovalVotingParams, NodeFeatures}, + ValidatorIndex, +}; use sp_std::{collections::btree_map::BTreeMap, prelude::Vec}; /// Implementation for `DisabledValidators` @@ -47,3 +50,9 @@ where pub fn node_features() -> NodeFeatures { >::config().node_features } + +/// Approval voting subsystem configuration parameteres +pub fn approval_voting_params() -> ApprovalVotingParams { + let config = >::config(); + config.approval_voting_params +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 1e40f14f4922..43df232e92ab 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -23,12 +23,13 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, - ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, + OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, @@ -1631,6 +1632,7 @@ pub mod migrations { // Remove `im-online` pallet on-chain storage frame_support::migrations::RemovePallet::DbWeight>, + parachains_configuration::migration::v11::MigrateToV11, ); } @@ -1792,7 +1794,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(9)] + #[api_version(10)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1936,6 +1938,10 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::async_backing_params::() } + fn approval_voting_params() -> ApprovalVotingParams { + parachains_staging_runtime_api_impl::approval_voting_params::() + } + fn disabled_validators() -> Vec { parachains_staging_runtime_api_impl::disabled_validators::() } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 0a70a493b299..9b8eff480f22 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -45,12 +45,14 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + slashing, + vstaging::{ApprovalVotingParams, NodeFeatures}, + AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, + SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, @@ -1649,6 +1651,7 @@ pub mod migrations { ImOnlinePalletName, ::DbWeight, >, + parachains_configuration::migration::v11::MigrateToV11, ); } @@ -1789,7 +1792,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(9)] + #[api_version(10)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1933,6 +1936,10 @@ sp_api::impl_runtime_apis! { parachains_runtime_api_impl::async_backing_params::() } + fn approval_voting_params() -> ApprovalVotingParams { + parachains_staging_runtime_api_impl::approval_voting_params::() + } + fn disabled_validators() -> Vec { parachains_staging_runtime_api_impl::disabled_validators::() } diff --git a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl index 135999a092a7..3e1d8ba771c4 100644 --- a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl +++ b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl @@ -32,6 +32,8 @@ alice: parachain 2005 block height is at least 10 within 300 seconds alice: parachain 2006 block height is at least 10 within 300 seconds alice: parachain 2007 block height is at least 10 within 300 seconds +alice: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + # Check preparation time is under 10s. # Check all buckets <= 10. alice: reports histogram polkadot_pvf_preparation_time has at least 1 samples in buckets ["0.1", "0.5", "1", "2", "3", "10"] within 10 seconds diff --git a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml index e70322e13e6b..27cd81dface5 100644 --- a/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml +++ b/polkadot/zombienet_tests/functional/0002-parachains-disputes.toml @@ -5,6 +5,10 @@ timeout = 1000 max_validators_per_core = 5 needed_approvals = 8 +[relaychain.genesis.runtime.runtime_genesis_config.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + + [relaychain] default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" chain = "rococo-local" diff --git a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml new file mode 100644 index 000000000000..19c7015403d7 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.toml @@ -0,0 +1,115 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + needed_approvals = 4 + relay_vrf_modulo_samples = 6 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + max_approval_coalesce_count = 5 + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "alice" + args = [ "-lparachain=trace,runtime=debug" ] + count = 13 + +[[parachains]] +id = 2000 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=1" + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=1", "--parachain-id=2000"] + +[[parachains]] +id = 2001 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=10" + + [parachains.collator] + name = "collator02" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2001", "--pvf-complexity=10"] + +[[parachains]] +id = 2002 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=100" + + [parachains.collator] + name = "collator03" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2002", "--pvf-complexity=100"] + +[[parachains]] +id = 2003 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=300" + + [parachains.collator] + name = "collator04" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--parachain-id=2003", "--pvf-complexity=300"] + +[[parachains]] +id = 2004 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator05" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--parachain-id=2004", "--pvf-complexity=300"] + +[[parachains]] +id = 2005 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=20000 --pvf-complexity=400" + + [parachains.collator] + name = "collator06" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=20000", "--pvf-complexity=400", "--parachain-id=2005"] + +[[parachains]] +id = 2006 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator07" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2006"] + +[[parachains]] +id = 2007 +addToGenesis = true +genesis_state_generator = "undying-collator export-genesis-state --pov-size=100000 --pvf-complexity=300" + + [parachains.collator] + name = "collator08" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug", "--pov-size=100000", "--pvf-complexity=300", "--parachain-id=2007"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" \ No newline at end of file diff --git a/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.zndsl b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.zndsl new file mode 100644 index 000000000000..1fc4f6784460 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0009-approval-voting-coalescing.zndsl @@ -0,0 +1,32 @@ +Description: Approval voting coalescing does not lag finality +Network: ./0009-approval-voting-coalescing.toml +Creds: config + +# Check authority status. +alice: reports node_roles is 4 + +# Ensure parachains are registered. +alice: parachain 2000 is registered within 60 seconds +alice: parachain 2001 is registered within 60 seconds +alice: parachain 2002 is registered within 60 seconds +alice: parachain 2003 is registered within 60 seconds +alice: parachain 2004 is registered within 60 seconds +alice: parachain 2005 is registered within 60 seconds +alice: parachain 2006 is registered within 60 seconds +alice: parachain 2007 is registered within 60 seconds + +# Ensure parachains made progress. +alice: parachain 2000 block height is at least 10 within 300 seconds +alice: parachain 2001 block height is at least 10 within 300 seconds +alice: parachain 2002 block height is at least 10 within 300 seconds +alice: parachain 2003 block height is at least 10 within 300 seconds +alice: parachain 2004 block height is at least 10 within 300 seconds +alice: parachain 2005 block height is at least 10 within 300 seconds +alice: parachain 2006 block height is at least 10 within 300 seconds +alice: parachain 2007 block height is at least 10 within 300 seconds + +alice: reports substrate_block_height{status="finalized"} is at least 30 within 400 seconds + +alice: reports polkadot_parachain_approval_checking_finality_lag < 3 + +alice: reports polkadot_parachain_approvals_no_shows_total < 3 within 10 seconds diff --git a/prdoc/pr_1191.prdoc b/prdoc/pr_1191.prdoc new file mode 100644 index 000000000000..26626731be46 --- /dev/null +++ b/prdoc/pr_1191.prdoc @@ -0,0 +1,21 @@ +title: Approve multiple candidates with a single signature + +doc: + - audience: Node Operator + description: | + Changed approval-voting, approval-distribution to approve multiple candidate with a single message, it adds: + * A new parachains_db version. + * A new validation protocol to support the new message types. + The new logic will be disabled and will be enabled at a later date after all validators have upgraded. + +migrations: + db: + - name: Parachains database change from v4 to v5. + description: | + Approval-voting column format has been updated with several new fields. All existing data will be automatically + be migrated to the new values. + +crates: + - name: "polkadot" + +host_functions: []