Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Merge branch 'master' into ao-fixed-colander
Browse files Browse the repository at this point in the history
* master:
  make check-dependent-* only be executed in PRs (#4588)
  session_info: add dispute_period and random_seed (#4547)
  session-info: add new fields + migration (#4545)
  Bump zstd from 0.9.0+zstd.1.5.0 to 0.9.1+zstd.1.5.1 (#4597)
  Relaunch Rococo (#4577)
  Companion for substrate#9732 (#4104)
  Better logs and metrics on PoV fetching. (#4593)
  • Loading branch information
ordian committed Dec 27, 2021
2 parents c306c7b + 138535e commit 0e405c2
Show file tree
Hide file tree
Showing 56 changed files with 1,006 additions and 661 deletions.
5 changes: 5 additions & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,10 @@ default:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs
- if: $CI_COMMIT_REF_NAME == "rococo-v1"

.rules-test-pr: &rules-test-pr
rules:
- if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs

#### Vault secrets
.vault-secrets: &vault-secrets
secrets:
Expand Down Expand Up @@ -291,6 +295,7 @@ build-malus:
.check-dependent-project: &check-dependent-project
stage: build
<<: *docker-env
<<: *rules-test-pr
<<: *vault-secrets
script:
- git clone
Expand Down
360 changes: 181 additions & 179 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,5 @@ polkadot = { path = "/usr/bin/polkadot" }
[package.metadata.rpm.files]
"../scripts/packaging/polkadot.service" = { path = "/usr/lib/systemd/system/polkadot.service", mode = "644" }


[package.metadata.spellcheck]
config = "./scripts/gitlab/spellcheck.toml"
1 change: 1 addition & 0 deletions bridges/primitives/chain-rococo/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
impl_version: 0,
apis: sp_version::create_apis_vec![[]],
transaction_version: 0,
state_version: 0,
};

// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo.
Expand Down
26 changes: 16 additions & 10 deletions bridges/primitives/runtime/src/storage_proof.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
use hash_db::{HashDB, Hasher, EMPTY_PREFIX};
use sp_runtime::RuntimeDebug;
use sp_std::vec::Vec;
use sp_trie::{read_trie_value, Layout, MemoryDB, StorageProof};
use sp_trie::{read_trie_value, LayoutV1, MemoryDB, StorageProof};

/// This struct is used to read storage values from a subset of a Merklized database. The "proof"
/// is a subset of the nodes in the Merkle structure of the database, so that it provides
Expand Down Expand Up @@ -52,7 +52,8 @@ where
/// Reads a value from the available subset of storage. If the value cannot be read due to an
/// incomplete or otherwise invalid proof, this returns an error.
pub fn read_value(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
read_trie_value::<Layout<H>, _>(&self.db, &self.root, key)
// LayoutV1 or LayoutV0 is identical for proof that only read values.
read_trie_value::<LayoutV1<H>, _>(&self.db, &self.root, key)
.map_err(|_| Error::StorageValueUnavailable)
}
}
Expand All @@ -70,15 +71,20 @@ pub enum Error {
pub fn craft_valid_storage_proof() -> (sp_core::H256, StorageProof) {
use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend};

let state_version = sp_runtime::StateVersion::default();

// construct storage proof
let backend = <InMemoryBackend<sp_core::Blake2Hasher>>::from(vec![
(None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]),
(None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]),
(None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]),
// Value is too big to fit in a branch node
(None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]),
]);
let root = backend.storage_root(std::iter::empty()).0;
let backend = <InMemoryBackend<sp_core::Blake2Hasher>>::from((
vec![
(None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]),
(None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]),
(None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]),
// Value is too big to fit in a branch node
(None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]),
],
state_version,
));
let root = backend.storage_root(std::iter::empty(), state_version).0;
let proof = StorageProof::new(
prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key22"[..]])
.unwrap()
Expand Down
2 changes: 1 addition & 1 deletion erasure-coding/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ use polkadot_primitives::v0::{self, BlakeTwo256, Hash as H256, HashT};
use sp_core::Blake2Hasher;
use thiserror::Error;
use trie::{
trie_types::{TrieDB, TrieDBMut},
trie_types::{TrieDB, TrieDBMutV0 as TrieDBMut},
MemoryDB, Trie, TrieMut, EMPTY_PREFIX,
};

Expand Down
5 changes: 3 additions & 2 deletions node/client/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@
//! Provides the [`AbstractClient`] trait that is a super trait that combines all the traits the client implements.
//! There is also the [`Client`] enum that combines all the different clients into one common structure.

use polkadot_primitives::v1::{
AccountId, Balance, Block, BlockNumber, Hash, Header, Nonce, ParachainHost,
use polkadot_primitives::{
v1::{AccountId, Balance, Block, BlockNumber, Hash, Header, Nonce},
v2::ParachainHost,
};
use sc_client_api::{AuxStore, Backend as BackendT, BlockchainEvents, KeyIterator, UsageProvider};
use sc_executor::NativeElseWasmExecutor;
Expand Down
5 changes: 3 additions & 2 deletions node/core/approval-voting/src/criteria.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,9 @@ use parity_scale_codec::{Decode, Encode};
use polkadot_node_primitives::approval::{
self as approval_types, AssignmentCert, AssignmentCertKind, DelayTranche, RelayVRFStory,
};
use polkadot_primitives::v1::{
AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, SessionInfo, ValidatorIndex,
use polkadot_primitives::{
v1::{AssignmentId, AssignmentPair, CandidateHash, CoreIndex, GroupIndex, ValidatorIndex},
v2::SessionInfo,
};
use sc_keystore::LocalKeystore;
use sp_application_crypto::ByteArray;
Expand Down
8 changes: 7 additions & 1 deletion node/core/approval-voting/src/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,7 @@ pub(crate) mod tests {
use polkadot_node_primitives::approval::{VRFOutput, VRFProof};
use polkadot_node_subsystem::messages::AllMessages;
use polkadot_node_subsystem_test_helpers::make_subsystem_context;
use polkadot_primitives::v1::{SessionInfo, ValidatorIndex};
use polkadot_primitives::{v1::ValidatorIndex, v2::SessionInfo};
pub(crate) use sp_consensus_babe::{
digests::{CompatibleDigestItem, PreDigest, SecondaryVRFPreDigest},
AllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch,
Expand Down Expand Up @@ -685,6 +685,9 @@ pub(crate) mod tests {
n_delay_tranches: index as _,
no_show_slots: index as _,
needed_approvals: index as _,
active_validator_indices: Vec::new(),
dispute_period: 6,
random_seed: [0u8; 32],
}
}

Expand Down Expand Up @@ -1140,6 +1143,9 @@ pub(crate) mod tests {
relay_vrf_modulo_samples: irrelevant,
n_delay_tranches: irrelevant,
no_show_slots: irrelevant,
active_validator_indices: Vec::new(),
dispute_period: 6,
random_seed: [0u8; 32],
};

let slot = Slot::from(10);
Expand Down
11 changes: 7 additions & 4 deletions node/core/approval-voting/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,13 @@ use polkadot_node_subsystem_util::{
},
TimeoutExt,
};
use polkadot_primitives::v1::{
ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement,
GroupIndex, Hash, SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId,
ValidatorIndex, ValidatorPair, ValidatorSignature,
use polkadot_primitives::{
v1::{
ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt,
DisputeStatement, GroupIndex, Hash, SessionIndex, ValidDisputeStatementKind, ValidatorId,
ValidatorIndex, ValidatorPair, ValidatorSignature,
},
v2::SessionInfo,
};
use sc_keystore::LocalKeystore;
use sp_application_crypto::Pair;
Expand Down
98 changes: 26 additions & 72 deletions node/core/approval-voting/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -740,6 +740,24 @@ impl ChainBuilder {
}
}

fn session_info(keys: &[Sr25519Keyring]) -> SessionInfo {
SessionInfo {
validators: keys.iter().map(|v| v.public().into()).collect(),
discovery_keys: keys.iter().map(|v| v.public().into()).collect(),
assignment_keys: keys.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1)]],
n_cores: keys.len() as _,
needed_approvals: 2,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
active_validator_indices: vec![],
dispute_period: 6,
random_seed: [0u8; 32],
}
}

async fn import_block(
overseer: &mut VirtualOverseer,
hashes: &[(Hash, Header)],
Expand All @@ -757,18 +775,7 @@ async fn import_block(

let session_info = config.session_info.clone().unwrap_or({
let validators = vec![Sr25519Keyring::Alice, Sr25519Keyring::Bob];
SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![vec![ValidatorIndex(0)], vec![ValidatorIndex(1)]],
n_cores: validators.len() as _,
needed_approvals: 1,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
}
SessionInfo { needed_approvals: 1, ..session_info(&validators) }
});

overseer_send(
Expand Down Expand Up @@ -1452,20 +1459,13 @@ fn subsystem_second_approval_import_only_schedules_wakeups() {
Sr25519Keyring::Eve,
];
let session_info = SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
needed_approvals: 1,
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
n_cores: validators.len() as _,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
..session_info(&validators)
};

// Add block hash 0x01...
Expand Down Expand Up @@ -1763,20 +1763,12 @@ fn import_checked_approval_updates_entries_and_schedules() {
Sr25519Keyring::Eve,
];
let session_info = SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
needed_approvals: 2,
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
n_cores: validators.len() as _,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
..session_info(&validators)
};

let candidate_descriptor = make_candidate(1.into(), &block_hash);
Expand Down Expand Up @@ -1923,20 +1915,12 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() {
Sr25519Keyring::Eve,
];
let session_info = SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
needed_approvals: 2,
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
n_cores: validators.len() as _,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
..session_info(&validators)
};

ChainBuilder::new()
Expand Down Expand Up @@ -2209,20 +2193,12 @@ fn subsystem_validate_approvals_cache() {
Sr25519Keyring::Eve,
];
let session_info = SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
needed_approvals: 2,
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
n_cores: validators.len() as _,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
..session_info(&validators)
};

let candidates = Some(vec![(candidate_receipt.clone(), CoreIndex(0), GroupIndex(0))]);
Expand Down Expand Up @@ -2422,20 +2398,14 @@ where
Sr25519Keyring::Ferdie,
];
let session_info = SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2), ValidatorIndex(3)],
vec![ValidatorIndex(4), ValidatorIndex(5)],
],
needed_approvals: 2,
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
n_cores: validators.len() as _,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 2,
n_delay_tranches: 50,
no_show_slots,
..session_info(&validators)
};

ChainBuilder::new()
Expand Down Expand Up @@ -2740,20 +2710,12 @@ fn pre_covers_dont_stall_approval() {
Sr25519Keyring::One,
];
let session_info = SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2), ValidatorIndex(5)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
needed_approvals: 2,
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
n_cores: validators.len() as _,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
..session_info(&validators)
};

let candidate_descriptor = make_candidate(1.into(), &block_hash);
Expand Down Expand Up @@ -2920,20 +2882,12 @@ fn waits_until_approving_assignments_are_old_enough() {
Sr25519Keyring::One,
];
let session_info = SessionInfo {
validators: validators.iter().map(|v| v.public().into()).collect(),
validator_groups: vec![
vec![ValidatorIndex(0), ValidatorIndex(1)],
vec![ValidatorIndex(2), ValidatorIndex(5)],
vec![ValidatorIndex(3), ValidatorIndex(4)],
],
needed_approvals: 2,
discovery_keys: validators.iter().map(|v| v.public().into()).collect(),
assignment_keys: validators.iter().map(|v| v.public().into()).collect(),
n_cores: validators.len() as _,
zeroth_delay_tranche_width: 5,
relay_vrf_modulo_samples: 3,
n_delay_tranches: 50,
no_show_slots: 2,
..session_info(&validators)
};

let candidate_descriptor = make_candidate(1.into(), &block_hash);
Expand Down
11 changes: 7 additions & 4 deletions node/core/dispute-coordinator/src/real/initialized.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,13 @@ use polkadot_node_subsystem::{
use polkadot_node_subsystem_util::rolling_session_window::{
RollingSessionWindow, SessionWindowUpdate,
};
use polkadot_primitives::v1::{
byzantine_threshold, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement,
DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, SessionInfo,
ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature,
use polkadot_primitives::{
v1::{
byzantine_threshold, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement,
DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex,
ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature,
},
v2::SessionInfo,
};

use crate::{metrics::Metrics, real::DisputeCoordinatorSubsystem, LOG_TARGET};
Expand Down
Loading

0 comments on commit 0e405c2

Please sign in to comment.