Skip to content

Commit

Permalink
BABE's revert procedure (paritytech#11022)
Browse files Browse the repository at this point in the history
* First rough draft for BABE revert

* Proper babe revert test

* Cleanup

* Test trivial cleanup

* Fix to make clippy happy

* Check polkadot companion

* Check cumulus companion

* Remove babe's blocks weight on revert

* Handle "empty" blockchain edge case

* Run companions

* Simplify the filter predicate

* Saturating sub is not required

* Run pipeline

* Run pipeline again...
  • Loading branch information
davxy authored and grishasobol committed Mar 28, 2022
1 parent 1364243 commit c7a7359
Show file tree
Hide file tree
Showing 7 changed files with 314 additions and 22 deletions.
2 changes: 1 addition & 1 deletion bin/node-template/node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ pub fn run() -> sc_cli::Result<()> {
runner.async_run(|config| {
let PartialComponents { client, task_manager, backend, .. } =
service::new_partial(&config)?;
Ok((cmd.run(client, backend), task_manager))
Ok((cmd.run(client, backend, None), task_manager))
})
},
Some(Subcommand::Benchmark(cmd)) =>
Expand Down
8 changes: 7 additions & 1 deletion bin/node/cli/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,13 @@ pub fn run() -> Result<()> {
let runner = cli.create_runner(cmd)?;
runner.async_run(|config| {
let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?;
Ok((cmd.run(client, backend), task_manager))
let revert_aux = Box::new(|client, backend, blocks| {
sc_consensus_babe::revert(client, backend, blocks)?;
// TODO: grandpa revert
Ok(())
});

Ok((cmd.run(client, backend, Some(revert_aux)), task_manager))
})
},
#[cfg(feature = "try-runtime")]
Expand Down
16 changes: 14 additions & 2 deletions client/cli/src/commands/revert_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use crate::{
use clap::Parser;
use sc_client_api::{Backend, UsageProvider};
use sc_service::chain_ops::revert_chain;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor};
use std::{fmt::Debug, str::FromStr, sync::Arc};

/// The `revert` command used revert the chain to a previous state.
Expand All @@ -43,16 +43,28 @@ pub struct RevertCmd {
pub pruning_params: PruningParams,
}

/// Revert handler for auxiliary data (e.g. consensus).
type AuxRevertHandler<C, BA, B> =
Box<dyn FnOnce(Arc<C>, Arc<BA>, NumberFor<B>) -> error::Result<()>>;

impl RevertCmd {
/// Run the revert command
pub async fn run<B, BA, C>(&self, client: Arc<C>, backend: Arc<BA>) -> error::Result<()>
pub async fn run<B, BA, C>(
&self,
client: Arc<C>,
backend: Arc<BA>,
aux_revert: Option<AuxRevertHandler<C, BA, B>>,
) -> error::Result<()>
where
B: BlockT,
BA: Backend<B>,
C: UsageProvider<B>,
<<<B as BlockT>::Header as HeaderT>::Number as FromStr>::Err: Debug,
{
let blocks = self.num.parse()?;
if let Some(aux_revert) = aux_revert {
aux_revert(client.clone(), backend.clone(), blocks)?;
}
revert_chain(client, backend, blocks)?;

Ok(())
Expand Down
79 changes: 76 additions & 3 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,8 @@ use retain_mut::RetainMut;
use schnorrkel::SignatureError;

use sc_client_api::{
backend::AuxStore, AuxDataOperations, BlockchainEvents, FinalityNotification, PreCommitActions,
ProvideUncles, UsageProvider,
backend::AuxStore, AuxDataOperations, Backend as BackendT, BlockchainEvents,
FinalityNotification, PreCommitActions, ProvideUncles, UsageProvider,
};
use sc_consensus::{
block_import::{
Expand All @@ -113,7 +113,9 @@ use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}
use sp_api::{ApiExt, ProvideRuntimeApi};
use sp_application_crypto::AppKey;
use sp_block_builder::BlockBuilder as BlockBuilderApi;
use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult};
use sp_blockchain::{
Backend as _, Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult,
};
use sp_consensus::{
BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer,
SelectChain,
Expand Down Expand Up @@ -1830,3 +1832,74 @@ where

Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry))
}

/// Reverts aux data.
pub fn revert<Block, Client, Backend>(
client: Arc<Client>,
backend: Arc<Backend>,
blocks: NumberFor<Block>,
) -> ClientResult<()>
where
Block: BlockT,
Client: AuxStore
+ HeaderMetadata<Block, Error = sp_blockchain::Error>
+ HeaderBackend<Block>
+ ProvideRuntimeApi<Block>
+ UsageProvider<Block>,
Client::Api: BabeApi<Block>,
Backend: BackendT<Block>,
{
let best_number = client.info().best_number;
let finalized = client.info().finalized_number;
let revertible = blocks.min(best_number - finalized);

let number = best_number - revertible;
let hash = client
.block_hash_from_id(&BlockId::Number(number))?
.ok_or(ClientError::Backend(format!(
"Unexpected hash lookup failure for block number: {}",
number
)))?;

// Revert epoch changes tree.

let config = Config::get(&*client)?;
let epoch_changes =
aux_schema::load_epoch_changes::<Block, Client>(&*client, config.genesis_config())?;
let mut epoch_changes = epoch_changes.shared_data();

if number == Zero::zero() {
// Special case, no epoch changes data were present on genesis.
*epoch_changes = EpochChangesFor::<Block, Epoch>::default();
} else {
epoch_changes.revert(descendent_query(&*client), hash, number);
}

// Remove block weights added after the revert point.

let mut weight_keys = HashSet::with_capacity(revertible.saturated_into());
let leaves = backend.blockchain().leaves()?.into_iter().filter(|&leaf| {
sp_blockchain::tree_route(&*client, hash, leaf)
.map(|route| route.retracted().is_empty())
.unwrap_or_default()
});
for leaf in leaves {
let mut hash = leaf;
// Insert parent after parent until we don't hit an already processed
// branch or we reach a direct child of the rollback point.
while weight_keys.insert(aux_schema::block_weight_key(hash)) {
let meta = client.header_metadata(hash)?;
if meta.number <= number + One::one() {
// We've reached a child of the revert point, stop here.
break
}
hash = client.header_metadata(hash)?.parent;
}
}
let weight_keys: Vec<_> = weight_keys.iter().map(|val| val.as_slice()).collect();

// Write epoch changes and remove weights in one shot.
aux_schema::write_epoch_changes::<Block, _, _>(&epoch_changes, |values| {
client.insert_aux(values, weight_keys.iter())
})
}
82 changes: 82 additions & 0 deletions client/consensus/babe/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -735,6 +735,88 @@ fn importing_block_one_sets_genesis_epoch() {
assert_eq!(epoch_for_second_block, genesis_epoch);
}

#[test]
fn revert_prunes_epoch_changes_and_removes_weights() {
let mut net = BabeTestNet::new(1);

let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");

let client = peer.client().as_client();
let backend = peer.client().as_backend();
let mut block_import = data.block_import.lock().take().expect("import set up during init");
let epoch_changes = data.link.epoch_changes.clone();

let mut proposer_factory = DummyFactory {
client: client.clone(),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};

let mut propose_and_import_blocks_wrap = |parent_id, n| {
propose_and_import_blocks(&client, &mut proposer_factory, &mut block_import, parent_id, n)
};

// Test scenario.
// Information for epoch 19 is produced on three different forks at block #13.
// One branch starts before the revert point (epoch data should be maintained).
// One branch starts after the revert point (epoch data should be removed).
//
// *----------------- F(#13) --#18 < fork #2
// /
// A(#1) ---- B(#7) ----#8----+-----#12----- C(#13) ---- D(#19) ------#21 < canon
// \ ^ \
// \ revert *---- G(#13) ---- H(#19) ---#20 < fork #3
// \ to #10
// *-----E(#7)---#11 < fork #1
let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 21);
let fork1 = propose_and_import_blocks_wrap(BlockId::Hash(canon[0]), 10);
let fork2 = propose_and_import_blocks_wrap(BlockId::Hash(canon[7]), 10);
let fork3 = propose_and_import_blocks_wrap(BlockId::Hash(canon[11]), 8);

// We should be tracking a total of 9 epochs in the fork tree
assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8);
// And only one root
assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1);

// Revert canon chain to block #10 (best(21) - 11)
revert(client.clone(), backend, 11).expect("revert should work for baked test scenario");

// Load and check epoch changes.

let actual_nodes = aux_schema::load_epoch_changes::<Block, TestClient>(
&*client,
data.link.config.genesis_config(),
)
.expect("load epoch changes")
.shared_data()
.tree()
.iter()
.map(|(h, _, _)| *h)
.collect::<Vec<_>>();

let expected_nodes = vec![
canon[0], // A
canon[6], // B
fork2[4], // F
fork1[5], // E
];

assert_eq!(actual_nodes, expected_nodes);

let weight_data_check = |hashes: &[Hash], expected: bool| {
hashes.iter().all(|hash| {
aux_schema::load_block_weight(&*client, hash).unwrap().is_some() == expected
})
};
assert!(weight_data_check(&canon[..10], true));
assert!(weight_data_check(&canon[10..], false));
assert!(weight_data_check(&fork1, true));
assert!(weight_data_check(&fork2, true));
assert!(weight_data_check(&fork3, false));
}

#[test]
fn importing_epoch_change_block_prunes_tree() {
let mut net = BabeTestNet::new(1);
Expand Down
51 changes: 41 additions & 10 deletions client/consensus/epochs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
pub mod migration;

use codec::{Decode, Encode};
use fork_tree::ForkTree;
use fork_tree::{FilterAction, ForkTree};
use sc_client_api::utils::is_descendent_of;
use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata};
use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero};
Expand Down Expand Up @@ -660,15 +660,6 @@ where
parent_number: Number,
slot: E::Slot,
) -> Result<Option<ViableEpochDescriptor<Hash, Number, E>>, fork_tree::Error<D::Error>> {
// find_node_where will give you the node in the fork-tree which is an ancestor
// of the `parent_hash` by default. if the last epoch was signalled at the parent_hash,
// then it won't be returned. we need to create a new fake chain head hash which
// "descends" from our parent-hash.
let fake_head_hash = fake_head_hash(parent_hash);

let is_descendent_of =
descendent_of_builder.build_is_descendent_of(Some((fake_head_hash, *parent_hash)));

if parent_number == Zero::zero() {
// need to insert the genesis epoch.
return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot)))
Expand All @@ -683,6 +674,15 @@ where
}
}

// find_node_where will give you the node in the fork-tree which is an ancestor
// of the `parent_hash` by default. if the last epoch was signalled at the parent_hash,
// then it won't be returned. we need to create a new fake chain head hash which
// "descends" from our parent-hash.
let fake_head_hash = fake_head_hash(parent_hash);

let is_descendent_of =
descendent_of_builder.build_is_descendent_of(Some((fake_head_hash, *parent_hash)));

// We want to find the deepest node in the tree which is an ancestor
// of our block and where the start slot of the epoch was before the
// slot of our block. The genesis special-case doesn't need to look
Expand Down Expand Up @@ -798,6 +798,37 @@ where
});
self.epochs.insert((hash, number), persisted);
}

/// Revert to a specified block given its `hash` and `number`.
/// This removes all the epoch changes information that were announced by
/// all the given block descendents.
pub fn revert<D: IsDescendentOfBuilder<Hash>>(
&mut self,
descendent_of_builder: D,
hash: Hash,
number: Number,
) {
let is_descendent_of = descendent_of_builder.build_is_descendent_of(None);

let filter = |node_hash: &Hash, node_num: &Number, _: &PersistedEpochHeader<E>| {
if number >= *node_num &&
(is_descendent_of(node_hash, &hash).unwrap_or_default() || *node_hash == hash)
{
// Continue the search in this subtree.
FilterAction::KeepNode
} else if number < *node_num && is_descendent_of(&hash, node_hash).unwrap_or_default() {
// Found a node to be removed.
FilterAction::Remove
} else {
// Not a parent or child of the one we're looking for, stop processing this branch.
FilterAction::KeepTree
}
};

self.inner.drain_filter(filter).for_each(|(h, n, _)| {
self.epochs.remove(&(h, n));
});
}
}

/// Type alias to produce the epoch-changes tree from a block type.
Expand Down
Loading

0 comments on commit c7a7359

Please sign in to comment.