Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Prevent dispute-coordinator from doing any work before the initial …
Browse files Browse the repository at this point in the history
…node sync is complete.

On the first `ActiveLeavesUpdate` the subsystem queries the runtime to
obtain `RollingSessionWindow`. This often leads to errors because the
first leaf update generated by overseer is either the genesis block
(when the local database is empty) or the last seen block before the
node was stopped. This often leads to `NotSupported` errors when
querying the runtime api.

The mitigation is to pass a `SyncOracle` instance when constructing
`dispute coordinator` and don't do any work untill the full sync is
complete.
  • Loading branch information
tdimitrov committed Feb 7, 2023
1 parent 386bfb4 commit a601ff8
Show file tree
Hide file tree
Showing 5 changed files with 27 additions and 7 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion node/core/dispute-coordinator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ polkadot-node-subsystem = { path = "../../subsystem" }
polkadot-node-subsystem-util = { path = "../../subsystem-util" }

sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }

sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }

[dev-dependencies]
kvdb-memorydb = "0.13.0"
Expand Down
5 changes: 3 additions & 2 deletions node/core/dispute-coordinator/src/initialized.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ impl Initialized {
spam_slots: SpamSlots,
scraper: ChainScraper,
) -> Self {
let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics } = subsystem;
let DisputeCoordinatorSubsystem { config: _, store: _, keystore, metrics, sync_oracle: _ } =
subsystem;

let (participation_sender, participation_receiver) = mpsc::channel(1);
let participation = Participation::new(participation_sender);
Expand Down Expand Up @@ -1235,7 +1236,7 @@ enum MuxedMessage {
impl MuxedMessage {
async fn receive<Context>(
ctx: &mut Context,
from_sender: &mut participation::WorkerMessageReceiver,
from_sender: &mut WorkerMessageReceiver,
) -> FatalResult<Self> {
// We are only fusing here to make `select` happy, in reality we will quit if the stream
// ends.
Expand Down
25 changes: 21 additions & 4 deletions node/core/dispute-coordinator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ use futures::FutureExt;

use gum::CandidateHash;
use sc_keystore::LocalKeystore;
use sp_consensus::SyncOracle;

use polkadot_node_primitives::{
CandidateVotes, DisputeMessage, DisputeMessageCheckError, SignedDisputeStatement,
Expand Down Expand Up @@ -117,6 +118,7 @@ pub struct DisputeCoordinatorSubsystem {
store: Arc<dyn Database>,
keystore: Arc<LocalKeystore>,
metrics: Metrics,
sync_oracle: Box<dyn SyncOracle + Send + Sync>,
}

/// Configuration for the dispute coordinator subsystem.
Expand Down Expand Up @@ -164,8 +166,9 @@ impl DisputeCoordinatorSubsystem {
config: Config,
keystore: Arc<LocalKeystore>,
metrics: Metrics,
sync_oracle: Box<dyn SyncOracle + Send + Sync>,
) -> Self {
Self { store, config, keystore, metrics }
Self { store, config, keystore, metrics, sync_oracle }
}

/// Initialize and afterwards run `Initialized::run`.
Expand Down Expand Up @@ -213,8 +216,12 @@ impl DisputeCoordinatorSubsystem {
let db_params =
DatabaseParams { db: self.store.clone(), db_column: self.config.col_session_data };

// The usage of `SyncOracle` below is not 100% correct. A better approach will be to
// cache the result and once the oracle returns `sync complete` to consider the node
// synced and never query the oracle again. In this case however this is not necessary
// because the oracle is used only once during initialisation.
let (first_leaf, rolling_session_window) =
match get_rolling_session_window(ctx, db_params).await {
match get_rolling_session_window(ctx, db_params, &self.sync_oracle).await {
Ok(Some(update)) => update,
Ok(None) => {
gum::info!(target: LOG_TARGET, "received `Conclude` signal, exiting");
Expand Down Expand Up @@ -377,8 +384,9 @@ impl DisputeCoordinatorSubsystem {
async fn get_rolling_session_window<Context>(
ctx: &mut Context,
db_params: DatabaseParams,
sync_oracle: &Box<dyn SyncOracle + Send + Sync>,
) -> Result<Option<(ActivatedLeaf, RollingSessionWindow)>> {
if let Some(leaf) = { wait_for_first_leaf(ctx) }.await? {
if let Some(leaf) = { wait_for_first_leaf(ctx, sync_oracle) }.await? {
let sender = ctx.sender().clone();
Ok(Some((
leaf.clone(),
Expand All @@ -393,11 +401,20 @@ async fn get_rolling_session_window<Context>(

/// Wait for `ActiveLeavesUpdate`, returns `None` if `Conclude` signal came first.
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
async fn wait_for_first_leaf<Context>(ctx: &mut Context) -> Result<Option<ActivatedLeaf>> {
async fn wait_for_first_leaf<Context>(
ctx: &mut Context,
sync_oracle: &Box<dyn SyncOracle + Send + Sync>,
) -> Result<Option<ActivatedLeaf>> {
loop {
match ctx.recv().await? {
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(None),
FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => {
if sync_oracle.is_major_syncing() {
// still syncing - ignore this event because otherwise the runtime api calls will
// fail due to executing them on pruned blocks.
continue
}

if let Some(activated) = update.activated {
return Ok(Some(activated))
}
Expand Down
1 change: 1 addition & 0 deletions node/service/src/overseer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ where
dispute_coordinator_config,
keystore.clone(),
Metrics::register(registry)?,
Box::new(network_service.clone()),
))
.dispute_distribution(DisputeDistributionSubsystem::new(
keystore.clone(),
Expand Down

0 comments on commit a601ff8

Please sign in to comment.