(&target_client).await?;
+ if current_number != expected_number {
+ return Err(anyhow::format_err!(
+ "Transaction has failed to update best {} header at {} to {}. It is {}",
+ Source::NAME,
+ Target::NAME,
+ expected_number,
+ current_number,
+ ))
+ }
+
+ // verify that the pallet is still halted (or operational if it is the last batch)
+ ensure_pallet_operating_mode(&finality_target, is_last_batch).await?;
+ }
+
+ if let Some(latest_andatory_header_number) = latest_andatory_header_number {
+ log::info!(
+ target: "bridge",
+ "Successfully updated best {} header at {} to {}. Pallet is now operational",
+ Source::NAME,
+ Target::NAME,
+ latest_andatory_header_number,
+ );
+ }
+
+ Ok(())
+ })
+ }
+}
+
+/// Mandatory header and its finality proof.
+type HeaderAndProof = (
+ SyncHeader::SourceChain>>,
+ GrandpaJustification::SourceChain>>,
+);
+/// Vector of mandatory headers and their finality proofs.
+type HeadersAndProofs = Vec>;
+
+/// Returns best finalized source header number known to the bridge GRANDPA pallet at the target
+/// chain.
+///
+/// This function works even if bridge GRANDPA pallet at the target chain is halted.
+async fn best_source_block_number_at_target(
+ target_client: &Client,
+) -> anyhow::Result> {
+ Ok(read_client_state::(
+ &target_client,
+ None,
+ P::SourceChain::BEST_FINALIZED_HEADER_ID_METHOD,
+ )
+ .await?
+ .best_finalized_peer_at_best_self
+ .0)
+}
+
+/// Verify that the bridge GRANDPA pallet at the target chain is either halted, or operational.
+async fn ensure_pallet_operating_mode(
+ finality_target: &SubstrateFinalityTarget,
+ operational: bool,
+) -> anyhow::Result<()> {
+ match (operational, finality_target.ensure_pallet_active().await) {
+ (true, Ok(())) => Ok(()),
+ (false, Err(SubstrateError::BridgePalletIsHalted)) => Ok(()),
+ _ =>
+ return Err(anyhow::format_err!(
+ "Bridge GRANDPA pallet at {} is expected to be {}, but it isn't",
+ P::TargetChain::NAME,
+ if operational { "operational" } else { "halted" },
+ )),
+ }
+}
+
+/// Returns list of all mandatory headers in given range.
+async fn find_mandatory_headers_in_range(
+ finality_source: &SubstrateFinalitySource,
+ range: (BlockNumberOf, BlockNumberOf),
+) -> anyhow::Result> {
+ let mut mandatory_headers = Vec::new();
+ let mut current = range.0;
+ while current <= range.1 {
+ let (header, proof) = finality_source.header_and_finality_proof(current).await?.into();
+ if header.is_mandatory() {
+ match proof {
+ Some(proof) => mandatory_headers.push((header, proof)),
+ None =>
+ return Err(anyhow::format_err!(
+ "Missing GRANDPA justification for {} header {}",
+ P::SourceChain::NAME,
+ current,
+ )),
+ }
+ }
+
+ current += One::one();
+ }
+
+ Ok(mandatory_headers)
+}
+
+/// Given list of mandatory headers, prepare batches of headers, so that every batch may fit into
+/// single transaction.
+fn make_mandatory_headers_batches<
+ P: SubstrateFinalitySyncPipeline,
+ F: Fn(&HeaderAndProof) -> Weight,
+>(
+ mut headers_to_submit: HeadersAndProofs
,
+ submit_header_weight: F,
+) -> Vec> {
+ // now that we have all mandatory headers, let's prepare transactions
+ // (let's keep all our transactions below 2/3 of max tx size/weight to have some reserve
+ // for utility overhead + for halting transaction)
+ let maximal_tx_size = P::TargetChain::max_extrinsic_size() * 2 / 3;
+ let maximal_tx_weight = P::TargetChain::max_extrinsic_weight() * 2 / 3;
+ let mut current_batch_size: u32 = 0;
+ let mut current_batch_weight: Weight = 0;
+ let mut batches = Vec::new();
+ let mut i = 0;
+ while i < headers_to_submit.len() {
+ let header_and_proof_size =
+ headers_to_submit[i].0.encode().len() + headers_to_submit[i].1.encode().len();
+ let header_and_proof_weight = submit_header_weight(&headers_to_submit[i]);
+
+ let new_batch_size = current_batch_size
+ .saturating_add(u32::try_from(header_and_proof_size).unwrap_or(u32::MAX));
+ let new_batch_weight = current_batch_weight.saturating_add(header_and_proof_weight);
+
+ let is_exceeding_tx_size = new_batch_size > maximal_tx_size;
+ let is_exceeding_tx_weight = new_batch_weight > maximal_tx_weight;
+ let is_new_batch_required = is_exceeding_tx_size || is_exceeding_tx_weight;
+
+ if is_new_batch_required {
+ // if `i` is 0 and we're here, it is a weird situation: even single header submission is
+ // larger than we've planned for a bunch of headers. Let's be optimistic and hope that
+ // the tx will still succeed.
+ let spit_off_index = std::cmp::max(i, 1);
+ let remaining_headers_to_submit = headers_to_submit.split_off(spit_off_index);
+ batches.push(headers_to_submit);
+
+ // we'll reiterate the same header again => so set `current_*` to zero
+ current_batch_size = 0;
+ current_batch_weight = 0;
+ headers_to_submit = remaining_headers_to_submit;
+ i = 0;
+ } else {
+ current_batch_size = new_batch_size;
+ current_batch_weight = new_batch_weight;
+ i = i + 1;
+ }
+ }
+ if !headers_to_submit.is_empty() {
+ batches.push(headers_to_submit);
+ }
+ batches
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::cli::{RuntimeVersionType, SourceRuntimeVersionParams, TargetRuntimeVersionParams};
+ use bp_test_utils::{make_default_justification, test_header};
+ use relay_polkadot_client::Polkadot;
+ use sp_runtime::{traits::Header as _, DigestItem};
+
+ fn make_header_and_justification(
+ i: u32,
+ size: u32,
+ ) -> (SyncHeader, GrandpaJustification) {
+ let size = size as usize;
+ let mut header: bp_kusama::Header = test_header(i);
+ let justification = make_default_justification(&header);
+ let actual_size = header.encode().len() + justification.encode().len();
+ // additional digest means some additional bytes, so let's decrease `additional_digest_size`
+ // a bit
+ let additional_digest_size = size.saturating_sub(actual_size).saturating_sub(100);
+ header.digest_mut().push(DigestItem::Other(vec![0u8; additional_digest_size]));
+ let justification = make_default_justification(&header);
+ println!("{} {}", size, header.encode().len() + justification.encode().len());
+ (header.into(), justification)
+ }
+
+ #[test]
+ fn should_parse_cli_options() {
+ // when
+ let res = ReinitBridge::from_iter(vec![
+ "reinit-bridge",
+ "kusama-to-polkadot",
+ "--source-host",
+ "127.0.0.1",
+ "--source-port",
+ "42",
+ "--target-host",
+ "127.0.0.1",
+ "--target-port",
+ "43",
+ "--target-signer",
+ "//Alice",
+ ]);
+
+ // then
+ assert_eq!(
+ res,
+ ReinitBridge {
+ bridge: ReinitBridgeName::KusamaToPolkadot,
+ source: SourceConnectionParams {
+ source_host: "127.0.0.1".into(),
+ source_port: 42,
+ source_secure: false,
+ source_runtime_version: SourceRuntimeVersionParams {
+ source_version_mode: RuntimeVersionType::Bundle,
+ source_spec_version: None,
+ source_transaction_version: None,
+ }
+ },
+ target: TargetConnectionParams {
+ target_host: "127.0.0.1".into(),
+ target_port: 43,
+ target_secure: false,
+ target_runtime_version: TargetRuntimeVersionParams {
+ target_version_mode: RuntimeVersionType::Bundle,
+ target_spec_version: None,
+ target_transaction_version: None,
+ }
+ },
+ target_sign: TargetSigningParams {
+ target_signer: Some("//Alice".into()),
+ target_signer_password: None,
+ target_signer_file: None,
+ target_signer_password_file: None,
+ target_transactions_mortality: None,
+ },
+ }
+ );
+ }
+
+ #[test]
+ fn make_mandatory_headers_batches_and_empty_headers() {
+ let batches = make_mandatory_headers_batches::(vec![], |_| 0);
+ assert!(batches.is_empty());
+ }
+
+ #[test]
+ fn make_mandatory_headers_batches_with_single_batch() {
+ let headers_to_submit =
+ vec![make_header_and_justification(10, Polkadot::max_extrinsic_size() / 3)];
+ let batches =
+ make_mandatory_headers_batches::(headers_to_submit, |_| 0);
+ assert_eq!(batches.into_iter().map(|x| x.len()).collect::>(), vec![1],);
+ }
+
+ #[test]
+ fn make_mandatory_headers_batches_group_by_size() {
+ let headers_to_submit = vec![
+ make_header_and_justification(10, Polkadot::max_extrinsic_size() / 3),
+ make_header_and_justification(20, Polkadot::max_extrinsic_size() / 3),
+ make_header_and_justification(30, Polkadot::max_extrinsic_size() * 2 / 3),
+ make_header_and_justification(40, Polkadot::max_extrinsic_size()),
+ ];
+ let batches =
+ make_mandatory_headers_batches::(headers_to_submit, |_| 0);
+ assert_eq!(batches.into_iter().map(|x| x.len()).collect::>(), vec![2, 1, 1],);
+ }
+
+ #[test]
+ fn make_mandatory_headers_batches_group_by_weight() {
+ let headers_to_submit = vec![
+ make_header_and_justification(10, 0),
+ make_header_and_justification(20, 0),
+ make_header_and_justification(30, 0),
+ make_header_and_justification(40, 0),
+ ];
+ let batches = make_mandatory_headers_batches::(
+ headers_to_submit,
+ |(header, _)| {
+ if header.number() == 10 || header.number() == 20 {
+ Polkadot::max_extrinsic_weight() / 3
+ } else if header.number() == 30 {
+ Polkadot::max_extrinsic_weight() * 2 / 3
+ } else {
+ Polkadot::max_extrinsic_weight()
+ }
+ },
+ );
+ assert_eq!(batches.into_iter().map(|x| x.len()).collect::>(), vec![2, 1, 1],);
+ }
+}
diff --git a/bridges/relays/client-kusama/src/runtime.rs b/bridges/relays/client-kusama/src/runtime.rs
index 6d0ab5462d7c8..59a919e6cb971 100644
--- a/bridges/relays/client-kusama/src/runtime.rs
+++ b/bridges/relays/client-kusama/src/runtime.rs
@@ -70,6 +70,9 @@ pub enum Call {
/// Balances pallet.
#[codec(index = 4)]
Balances(BalancesCall),
+ /// Utility pallet.
+ #[codec(index = 24)]
+ Utility(UtilityCall),
/// Polkadot bridge pallet.
#[codec(index = 110)]
BridgePolkadotGrandpa(BridgePolkadotGrandpaCall),
@@ -102,6 +105,8 @@ pub enum BridgePolkadotGrandpaCall {
),
#[codec(index = 1)]
initialize(bp_header_chain::InitializationData<::Header>),
+ #[codec(index = 3)]
+ set_operational(bool),
}
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
@@ -136,6 +141,13 @@ pub enum BridgePolkadotMessagesCall {
),
}
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum UtilityCall {
+ #[codec(index = 2)]
+ batch_all(Vec),
+}
+
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
pub enum BridgePolkadotMessagesParameter {
#[codec(index = 0)]
diff --git a/bridges/relays/client-polkadot/src/runtime.rs b/bridges/relays/client-polkadot/src/runtime.rs
index 8b125a37843c8..fa45115a6b5c2 100644
--- a/bridges/relays/client-polkadot/src/runtime.rs
+++ b/bridges/relays/client-polkadot/src/runtime.rs
@@ -70,6 +70,9 @@ pub enum Call {
/// Balances pallet.
#[codec(index = 5)]
Balances(BalancesCall),
+ /// Utility pallet.
+ #[codec(index = 26)]
+ Utility(UtilityCall),
/// Kusama bridge pallet.
#[codec(index = 110)]
BridgeKusamaGrandpa(BridgeKusamaGrandpaCall),
@@ -102,6 +105,8 @@ pub enum BridgeKusamaGrandpaCall {
),
#[codec(index = 1)]
initialize(bp_header_chain::InitializationData<::Header>),
+ #[codec(index = 3)]
+ set_operational(bool),
}
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
@@ -136,6 +141,13 @@ pub enum BridgeKusamaMessagesCall {
),
}
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum UtilityCall {
+ #[codec(index = 2)]
+ batch_all(Vec),
+}
+
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
pub enum BridgeKusamaMessagesParameter {
#[codec(index = 0)]
diff --git a/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs b/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
index 84fb8661016b0..3daf8d11440eb 100644
--- a/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
+++ b/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
@@ -65,7 +65,7 @@ pub trait SubstrateFinalitySyncPipeline: 'static + Clone + Debug + Send + Sync {
/// Adapter that allows all `SubstrateFinalitySyncPipeline` to act as `FinalitySyncPipeline`.
#[derive(Clone, Debug)]
-pub(crate) struct FinalitySyncPipelineAdapter {
+pub struct FinalitySyncPipelineAdapter {
_phantom: PhantomData,
}
diff --git a/bridges/relays/lib-substrate-relay/src/finality_target.rs b/bridges/relays/lib-substrate-relay/src/finality_target.rs
index b7bc90cb4de79..4c5814171049a 100644
--- a/bridges/relays/lib-substrate-relay/src/finality_target.rs
+++ b/bridges/relays/lib-substrate-relay/src/finality_target.rs
@@ -52,7 +52,7 @@ impl SubstrateFinalityTarget {
}
/// Ensure that the GRANDPA pallet at target chain is active.
- async fn ensure_pallet_active(&self) -> Result<(), Error> {
+ pub async fn ensure_pallet_active(&self) -> Result<(), Error> {
let is_halted = self
.client
.storage_value(is_halted_key(P::SourceChain::WITH_CHAIN_GRANDPA_PALLET_NAME), None)