Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Companion PR to splitting Roles #960

Merged
merged 8 commits into from
Apr 3, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cli/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ where
TLightClient<R, D>
>,
{
match config.roles {
service::Roles::LIGHT =>
match config.role {
service::Role::Light =>
sc_cli::run_service_until_exit(
config,
|config| service::new_light::<R, D, E>(config),
Expand Down
10 changes: 5 additions & 5 deletions collator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use polkadot_primitives::{
};
use polkadot_cli::{
ProvideRuntimeApi, AbstractService, ParachainHost, IsKusama,
service::{self, Roles}
service::{self, Role}
};
pub use polkadot_cli::{VersionInfo, load_spec, service::Configuration};
pub use polkadot_validation::SignedStatement;
Expand Down Expand Up @@ -344,8 +344,8 @@ where
<P::ParachainContext as ParachainContext>::ProduceCandidate: Send,
{
let is_kusama = config.expect_chain_spec().is_kusama();
match (is_kusama, config.roles) {
(_, Roles::LIGHT) => return Err(
match (is_kusama, config.role) {
(_, Role::Light) => return Err(
polkadot_service::Error::Other("light nodes are unsupported as collator".into())
).into(),
(true, _) =>
Expand Down Expand Up @@ -389,8 +389,8 @@ pub fn run_collator<P>(
P::ParachainContext: Send + 'static,
<P::ParachainContext as ParachainContext>::ProduceCandidate: Send,
{
match (config.expect_chain_spec().is_kusama(), config.roles) {
(_, Roles::LIGHT) => return Err(
match (config.expect_chain_spec().is_kusama(), config.role) {
(_, Role::Light) => return Err(
polkadot_cli::Error::Input("light nodes are unsupported as collator".into())
).into(),
(true, _) =>
Expand Down
14 changes: 7 additions & 7 deletions network/src/legacy/gossip/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@

use sp_runtime::traits::{BlakeTwo256, Hash as HashT};
use sp_blockchain::Error as ClientError;
use sc_network::{config::Roles, PeerId, ReputationChange};
use sc_network::{ObservedRole, PeerId, ReputationChange};
use sc_network::NetworkService;
use sc_network_gossip::{
ValidationResult as GossipValidationResult,
Expand Down Expand Up @@ -635,7 +635,7 @@ impl<C: ChainContext + ?Sized> MessageValidator<C> {
}

impl<C: ChainContext + ?Sized> sc_network_gossip::Validator<Block> for MessageValidator<C> {
fn new_peer(&self, _context: &mut dyn ValidatorContext<Block>, who: &PeerId, _roles: Roles) {
fn new_peer(&self, _context: &mut dyn ValidatorContext<Block>, who: &PeerId, _roles: ObservedRole) {
let mut inner = self.inner.write();
inner.peers.insert(who.clone(), PeerData::default());
}
Expand Down Expand Up @@ -833,7 +833,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::FULL);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -911,7 +911,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::FULL);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -953,7 +953,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::FULL);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -1007,7 +1007,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::FULL);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down Expand Up @@ -1099,7 +1099,7 @@ mod tests {
let peer_a = PeerId::random();

let mut validator_context = MockValidatorContext::default();
validator.new_peer(&mut validator_context, &peer_a, Roles::FULL);
validator.new_peer(&mut validator_context, &peer_a, ObservedRole::FULL);
assert!(validator_context.events.is_empty());
validator_context.clear();

Expand Down
12 changes: 6 additions & 6 deletions network/src/protocol/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ use polkadot_validation::{
SharedTable, TableRouter, Network as ParachainNetwork, Validated, GenericStatement, Collators,
SignedStatement,
};
use sc_network::{config::Roles, Event, PeerId};
use sc_network::{ObservedRole, Event, PeerId};
use sp_api::ProvideRuntimeApi;
use sp_runtime::ConsensusEngineId;

Expand Down Expand Up @@ -72,7 +72,7 @@ mod tests;
// Messages from the service API or network adapter.
enum ServiceToWorkerMsg {
// basic peer messages.
PeerConnected(PeerId, Roles),
PeerConnected(PeerId, ObservedRole),
PeerMessage(PeerId, Vec<bytes::Bytes>),
PeerDisconnected(PeerId),

Expand Down Expand Up @@ -247,11 +247,11 @@ pub fn start<C, Api, SP>(
Event::NotificationStreamOpened {
remote,
engine_id,
roles,
role,
} => {
if engine_id != POLKADOT_ENGINE_ID { continue }

worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, roles)).await
worker_sender.send(ServiceToWorkerMsg::PeerConnected(remote, role)).await
},
Event::NotificationStreamClosed {
remote,
Expand Down Expand Up @@ -483,8 +483,8 @@ impl ProtocolHandler {
}
}

fn on_connect(&mut self, peer: PeerId, roles: Roles) {
let claimed_validator = roles.contains(Roles::AUTHORITY);
fn on_connect(&mut self, peer: PeerId, role: ObservedRole) {
let claimed_validator = matches!(role, ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority | ObservedRole::Authority);
Copy link
Contributor Author

@tomaka tomaka Apr 1, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be reviewed.
I kept the exact behaviour as it was, but maybe that's not desired, I don't know.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For the moment, this behavior is OK. There are definitely some things that we could do better now that we are aware of our own sentry nodes and guarded validator.


self.peers.insert(peer.clone(), PeerData {
claimed_validator,
Expand Down
6 changes: 3 additions & 3 deletions network/src/protocol/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ sp_api::mock_impl_runtime_apis! {
}

impl super::Service {
async fn connect_peer(&mut self, peer: PeerId, roles: Roles) {
async fn connect_peer(&mut self, peer: PeerId, roles: ObservedRole) {
self.sender.send(ServiceToWorkerMsg::PeerConnected(peer, roles)).await.unwrap();
}

Expand Down Expand Up @@ -374,7 +374,7 @@ fn validator_peer_cleaned_up() {

pool.spawner().spawn_local(worker_task).unwrap();
pool.run_until(async move {
service.connect_peer(peer.clone(), Roles::AUTHORITY).await;
service.connect_peer(peer.clone(), ObservedRole::AUTHORITY).await;
service.peer_message(peer.clone(), Message::Status(Status {
version: VERSION,
collating_for: None,
Expand Down Expand Up @@ -434,7 +434,7 @@ fn validator_key_spillover_cleaned() {

pool.spawner().spawn_local(worker_task).unwrap();
pool.run_until(async move {
service.connect_peer(peer.clone(), Roles::AUTHORITY).await;
service.connect_peer(peer.clone(), ObservedRole::AUTHORITY).await;
service.peer_message(peer.clone(), Message::Status(Status {
version: VERSION,
collating_for: None,
Expand Down
15 changes: 5 additions & 10 deletions network/test/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ use sc_client_api::{
};
use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
use sc_client::LongestChain;
use sc_network::config::Roles;
use sp_consensus::block_validation::DefaultBlockAnnounceValidator;
use sp_consensus::import_queue::{
BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport,
Expand Down Expand Up @@ -526,22 +525,21 @@ pub trait TestNetFactory: Sized {
/// Create new test network with this many peers.
fn new(n: usize) -> Self {
trace!(target: "test_network", "Creating test network");
let config = Self::default_config();
let mut net = Self::from_config(&config);

for i in 0..n {
trace!(target: "test_network", "Adding peer {}", i);
net.add_full_peer(&config);
net.add_full_peer();
}
net
}

fn add_full_peer(&mut self, config: &ProtocolConfig) {
self.add_full_peer_with_states(config, None)
fn add_full_peer(&mut self,) {
self.add_full_peer_with_states(None)
}

/// Add a full peer.
fn add_full_peer_with_states(&mut self, config: &ProtocolConfig, keep_blocks: Option<u32>) {
fn add_full_peer_with_states(&mut self, keep_blocks: Option<u32>) {
let test_client_builder = match keep_blocks {
Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks),
None => TestClientBuilder::with_default_backend(),
Expand Down Expand Up @@ -618,10 +616,7 @@ pub trait TestNetFactory: Sized {
}

/// Add a light peer.
fn add_light_peer(&mut self, config: &ProtocolConfig) {
let mut config = config.clone();
config.roles = Roles::LIGHT;

fn add_light_peer(&mut self) {
let (c, backend) = polkadot_test_runtime_client::new_light();
let client = Arc::new(c);
let (
Expand Down
10 changes: 5 additions & 5 deletions service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use inherents::InherentDataProviders;
use sc_executor::native_executor_instance;
use log::info;
pub use service::{
AbstractService, Roles, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand,
AbstractService, Role, PruningMode, TransactionPoolOptions, Error, RuntimeGenesis, ServiceBuilderCommand,
TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor,
Configuration, ChainSpec,
};
Expand Down Expand Up @@ -316,7 +316,8 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
use futures::stream::StreamExt;

let is_collator = collating_for.is_some();
let is_authority = config.roles.is_authority() && !is_collator;
let role = config.role.clone();
let is_authority = role.is_authority() && !is_collator;
let force_authoring = config.force_authoring;
let max_block_data_size = max_block_data_size;
let db_path = if let DatabaseConfig::Path { ref path, .. } = config.expect_database() {
Expand All @@ -327,7 +328,6 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
let disable_grandpa = config.disable_grandpa;
let name = config.name.clone();
let authority_discovery_enabled = authority_discovery_enabled;
let sentry_nodes = config.network.sentry_nodes.clone();
let slot_duration = slot_duration;

// sentry nodes announce themselves as authorities to the network
Expand Down Expand Up @@ -390,7 +390,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
service.spawn_task_handle(),
).map_err(|e| format!("Could not spawn network worker: {:?}", e))?;

if participates_in_consensus {
if let (Role::Authority { sentry_nodes }, true) = (&role, participates_in_consensus) {
let availability_store = {
use std::path::PathBuf;

Expand Down Expand Up @@ -472,7 +472,7 @@ pub fn new_full<Runtime, Dispatch, Extrinsic>(
let authority_discovery = authority_discovery::AuthorityDiscovery::new(
service.client(),
network,
sentry_nodes,
sentry_nodes.clone(),
service.keystore(),
dht_event_stream,
service.prometheus_registry(),
Expand Down