diff --git a/node/cli/src/command.rs b/node/cli/src/command.rs index 9a44cd960..ec2445401 100644 --- a/node/cli/src/command.rs +++ b/node/cli/src/command.rs @@ -74,6 +74,8 @@ fn load_spec(id: &str) -> std::result::Result, String> { Box::new(service::chain_spec::bifrost_polkadot::local_testnet_config()), #[cfg(any(feature = "with-bifrost-polkadot-runtime", feature = "with-bifrost-runtime"))] "bifrost-paseo" => Box::new(service::chain_spec::bifrost_polkadot::paseo_config()), + #[cfg(any(feature = "with-bifrost-polkadot-runtime", feature = "with-bifrost-runtime"))] + "bifrost-polkadot-dev" => Box::new(service::chain_spec::bifrost_polkadot::dev_config()), path => { let path = std::path::PathBuf::from(path); if path.to_str().map(|s| s.contains("bifrost-polkadot")) == Some(true) { @@ -193,7 +195,7 @@ macro_rules! with_runtime_or_err { #[cfg(not(any(feature = "with-bifrost-kusama-runtime",feature = "with-bifrost-runtime")))] return Err(service::BIFROST_KUSAMA_RUNTIME_NOT_AVAILABLE.into()); - } else if $chain_spec.is_bifrost_polkadot() { + } else if $chain_spec.is_bifrost_polkadot() || $chain_spec.is_dev() { #[cfg(any(feature = "with-bifrost-polkadot-runtime", feature = "with-bifrost-runtime"))] #[allow(unused_imports)] use service::collator_polkadot::{bifrost_polkadot_runtime::{Block, RuntimeApi}, start_node,new_partial}; @@ -449,6 +451,7 @@ pub fn run() -> Result<()> { info!("Parachain id: {:?}", id); info!("Parachain Account: {}", parachain_account); info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); + info!("Is dev modle: {}", if config.chain_spec.is_dev() { "yes" } else { "no" }); with_runtime_or_err!(config.chain_spec, { { diff --git a/node/service/src/chain_spec/bifrost_polkadot.rs b/node/service/src/chain_spec/bifrost_polkadot.rs index 6618aa5b7..ee486a4f3 100644 --- a/node/service/src/chain_spec/bifrost_polkadot.rs +++ b/node/service/src/chain_spec/bifrost_polkadot.rs @@ -256,6 +256,115 @@ pub fn local_testnet_config() -> ChainSpec { .build() } +pub fn dev_config() -> ChainSpec { + let endowed_accounts = vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + whitelisted_caller(), // Benchmarking whitelist_account + account("bechmarking_account_1", 0, 0), + ]; + let balances = endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT())).collect(); + let tokens = endowed_accounts + .iter() + .flat_map(|x| { + vec![ + (x.clone(), DOT, ENDOWMENT() * 4_000_000), + (x.clone(), WETH, ENDOWMENT() * 4_000_000), + ] + }) + .collect(); + let council_membership = vec![get_account_id_from_seed::("Alice")]; + let technical_committee_membership = vec![get_account_id_from_seed::("Alice")]; + let oracle_membership = vec![get_account_id_from_seed::("Alice")]; + let salp_multisig: AccountId = + hex!["49daa32c7287890f38b7e1a8cd2961723d36d20baa0bf3b82e0c4bdda93b1c0a"].into(); + let currency = vec![ + ( + BNC, + 10_000_000_000, + Some((String::from("Bifrost Native Coin"), String::from("BNC"), 12u8)), + ), + (DOT, 1_000_000, Some((String::from("Polkadot DOT"), String::from("DOT"), 10u8))), + ( + GLMR, + 1_000_000_000_000, + Some((String::from("Moonbeam Native Token"), String::from("GLMR"), 18u8)), + ), + (DOT_U, 1_000, Some((String::from("Tether USD"), String::from("USDT"), 6u8))), + (ASTR, 10_000_000_000_000_000, Some((String::from("Astar"), String::from("ASTR"), 18u8))), + ( + FIL, + 1_000_000_000_000, + Some((String::from("Filecoin Network Token"), String::from("FIL"), 18u8)), + ), + (USDC, 1_000, Some((String::from("USD Coin"), String::from("USDC"), 6u8))), + (IBTC, 100, Some((String::from("interBTC"), String::from("IBTC"), 8u8))), + (INTR, 10_000_000, Some((String::from("Interlay"), String::from("INTR"), 10u8))), + ( + MANTA, + 10_000_000_000_000, + Some((String::from("Manta Network"), String::from("MANTA"), 18u8)), + ), + ( + BNCS, + 10_000_000_000, + Some((String::from("bncs-20 inscription token BNCS"), String::from("BNCS"), 12u8)), + ), + (PINK, 100_000_000, Some((String::from("PINK"), String::from("PINK"), 10u8))), + (DED, 1, Some((String::from("DED"), String::from("DED"), 10u8))), + (PEN, 100_000_000, Some((String::from("Pendulum"), String::from("PEN"), 12u8))), + (WETH, 100_000_000, Some((String::from("SnowBridge WETH"), String::from("SWETH"), 18u8))), + ]; + let vcurrency = vec![VSToken2(DOT_TOKEN_ID), VToken(TokenSymbol::BNC), VToken2(DOT_TOKEN_ID)]; + + let mut evm_accounts = BTreeMap::new(); + evm_accounts.insert( + // H160 address of CI test runner account + H160::from_str("6be02d1d3665660d22ff9624b7be0551ee1ac91b") + .expect("internal H160 is valid; qed"), + fp_evm::GenesisAccount { + balance: U256::from(1_000_000_000_000_000_000_000_000u128), + code: Default::default(), + nonce: Default::default(), + storage: Default::default(), + }, + ); + + ChainSpec::builder( + bifrost_polkadot_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + RelayExtensions { relay_chain: "polkadot".into(), para_id: PARA_ID, evm_since: 1 }, + ) + .with_name("Bifrost Polkadot Dev Testnet") + .with_id("dev") + .with_chain_type(ChainType::Development) + .with_genesis_config_patch(bifrost_polkadot_genesis( + vec![ + ( + get_account_id_from_seed::("Alice"), + get_from_seed::("Alice"), + ), + (get_account_id_from_seed::("Bob"), get_from_seed::("Bob")), + ], + balances, + vec![], + PARA_ID.into(), + tokens, + council_membership, + technical_committee_membership, + salp_multisig, + (currency, vcurrency, vec![]), + oracle_membership, + evm_accounts, + )) + .with_properties(bifrost_polkadot_properties()) + .with_protocol_id(DEFAULT_PROTOCOL_ID) + .build() +} + pub fn paseo_config() -> ChainSpec { let invulnerables: Vec<(AccountId, AuraId)> = vec![ ( diff --git a/node/service/src/collator_polkadot.rs b/node/service/src/collator_polkadot.rs index 22db81766..5bbe2e7b5 100644 --- a/node/service/src/collator_polkadot.rs +++ b/node/service/src/collator_polkadot.rs @@ -35,7 +35,7 @@ use cumulus_client_consensus_aura::collators::basic::{ use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport; use cumulus_client_consensus_proposer::Proposer; -use crate::{chain_spec, eth}; +use crate::{chain_spec, eth, IdentifyVariant}; use bifrost_primitives::Block; use cumulus_client_service::{ build_network, build_relay_chain_interface, prepare_node_config, start_relay_chain_tasks, @@ -557,14 +557,18 @@ pub async fn start_node>( para_id: ParaId, hwbench: Option, ) -> sc_service::error::Result<(TaskManager, Arc)> { - start_node_impl::( - parachain_config, - polkadot_config, - eth_config, - collator_options, - CollatorSybilResistance::Resistant, - para_id, - hwbench, - ) - .await + if parachain_config.chain_spec.is_dev() { + crate::dev::start_node::(parachain_config, eth_config).await + } else { + start_node_impl::( + parachain_config, + polkadot_config, + eth_config, + collator_options, + CollatorSybilResistance::Resistant, + para_id, + hwbench, + ) + .await + } } diff --git a/node/service/src/dev.rs b/node/service/src/dev.rs index 5b6cbacdf..69ff11c62 100644 --- a/node/service/src/dev.rs +++ b/node/service/src/dev.rs @@ -16,23 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use bifrost_polkadot_runtime::{constants::time::SLOT_DURATION, TransactionConverter}; -use cumulus_primitives_core::{ParaId, PersistedValidationData}; -use cumulus_primitives_parachain_inherent::{ - MockValidationDataInherentDataProvider, MockXcmConfig, ParachainInherentData, -}; -use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; +use std::{cell::RefCell, sync::Arc}; + +use cumulus_primitives_core::relay_chain::Hash; +use fc_storage::StorageOverrideHandler; use jsonrpsee::core::async_trait; use sc_client_api::Backend; -use sc_executor::NativeElseWasmExecutor; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_network::NetworkBackend; +use sc_service::{Configuration, TaskManager}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_core::U256; -use std::{ - cell::RefCell, - collections::BTreeMap, - sync::{Arc, Mutex}, -}; + +use bifrost_polkadot_runtime::{constants::time::SLOT_DURATION, TransactionConverter}; use crate::{ collator_polkadot::FullClient, @@ -40,25 +34,11 @@ use crate::{ }; pub type Block = bifrost_primitives::Block; -pub type Executor = crate::collator_polkadot::BifrostPolkadotExecutor; pub type RuntimeApi = bifrost_polkadot_runtime::RuntimeApi; pub type FullBackend = sc_service::TFullBackend; pub type FullSelectChain = sc_consensus::LongestChain; -pub fn default_mock_parachain_inherent_data_provider() -> MockValidationDataInherentDataProvider { - MockValidationDataInherentDataProvider { - current_para_block: 0, - relay_offset: 1000, - relay_blocks_per_para_block: 2, - xcm_config: Default::default(), - raw_downward_messages: vec![], - raw_horizontal_messages: vec![], - para_blocks_per_relay_epoch: 0, - relay_randomness_config: (), - } -} - thread_local!(static TIMESTAMP: RefCell = const { RefCell::new(0) }); /// Provide a mock duration starting at 0 in millisecond for timestamp inherent. @@ -90,25 +70,37 @@ impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider { /// Builds a new development service. This service uses manual seal, and mocks /// the parachain inherent. /// Before calling this function, you must set OnTimestampSet in runtime to be (). -pub async fn start_node( - config: Configuration, +pub async fn start_node( + parachain_config: Configuration, eth_config: EthConfiguration, -) -> sc_service::error::Result<(TaskManager, Arc)> { - let params = crate::collator_polkadot::new_partial(&config, ð_config, true)?; - let (block_import, mut telemetry, telemetry_worker_handle, frontier_backend) = params.other; +) -> sc_service::error::Result<(TaskManager, Arc)> +where + Net: NetworkBackend, +{ + let params = crate::collator_polkadot::new_partial(¶chain_config, true)?; + let ( + _block_import, + mut telemetry, + _telemetry_worker_handle, + frontier_backend, + filter_pool, + fee_history_cache, + ) = params.other; let client = params.client.clone(); let backend = params.backend.clone(); let mut task_manager = params.task_manager; - let validator = config.role.is_authority(); - let prometheus_registry = config.prometheus_registry().cloned(); let transaction_pool = params.transaction_pool.clone(); - let net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); + let net_config = + sc_network::config::FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network); + let metrics = Net::register_notification_metrics( + parachain_config.prometheus_config.as_ref().map(|cfg| &cfg.registry), + ); let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, + config: ¶chain_config, net_config, client: client.clone(), transaction_pool: transaction_pool.clone(), @@ -117,11 +109,12 @@ pub async fn start_node( block_announce_validator_builder: None, warp_sync_params: None, block_relay: None, + metrics, })?; - let prometheus_registry = config.prometheus_registry().cloned(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); - if config.offchain_worker.enabled { + if parachain_config.offchain_worker.enabled { use futures::FutureExt; let backend_ofc = backend.clone(); @@ -135,8 +128,8 @@ pub async fn start_node( transaction_pool: Some(OffchainTransactionPoolFactory::new( transaction_pool.clone(), )), - network_provider: network.clone(), - is_validator: config.role.is_authority(), + network_provider: Arc::new(network.clone()), + is_validator: parachain_config.role.is_authority(), enable_http_requests: false, custom_extensions: move |_| vec![], }) @@ -145,10 +138,6 @@ pub async fn start_node( ); } - let role = config.role.clone(); - let force_authoring = config.force_authoring; - let backoff_authoring_blocks: Option<()> = None; - let select_chain = params .select_chain .expect("In `dev` mode, `new_partial` will return some `select_chain`; qed"); @@ -163,14 +152,6 @@ pub async fn start_node( // Channel for the rpc handler to communicate with the authorship task. let (command_sink, commands_stream) = futures::channel::mpsc::channel(1024); - - let pool = transaction_pool.pool().clone(); - - let client_for_cidp = client.clone(); - - let (downward_xcm_sender, downward_xcm_receiver) = flume::bounded::>(100); - let (hrmp_xcm_sender, hrmp_xcm_receiver) = flume::bounded::<(ParaId, Vec)>(100); - let authorship_future = sc_consensus_manual_seal::run_manual_seal(sc_consensus_manual_seal::ManualSealParams { block_import: client.clone(), @@ -180,35 +161,8 @@ pub async fn start_node( commands_stream, select_chain, consensus_data_provider: None, - create_inherent_data_providers: move |block, _| { - let current_para_block = client_for_cidp - .header(block) - .ok() - .flatten() - .expect("Header lookup should succeed") - .number; - let downward_xcm_receiver = downward_xcm_receiver.clone(); - let hrmp_xcm_receiver = hrmp_xcm_receiver.clone(); - let client_for_xcm = client_for_cidp.clone(); - async move { - let mocked_parachain = MockValidationDataInherentDataProvider { - current_para_block, - relay_offset: 1000, - relay_blocks_per_para_block: 2, - para_blocks_per_relay_epoch: 0, - relay_randomness_config: (), - xcm_config: MockXcmConfig::new( - &*client_for_xcm, - block, - Default::default(), - Default::default(), - ), - raw_downward_messages: downward_xcm_receiver.drain().collect(), - raw_horizontal_messages: hrmp_xcm_receiver.drain().collect(), - }; - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - Ok((timestamp, mocked_parachain)) - } + create_inherent_data_providers: move |_, ()| async move { + Ok(sp_timestamp::InherentDataProvider::from_system_time()) }, }); // we spawn the future on a background thread managed by service. @@ -218,7 +172,14 @@ pub async fn start_node( authorship_future, ); - let overrides = crate::rpc::overrides_handle(client.clone()); + let storage_override = Arc::new(StorageOverrideHandler::new(client.clone())); + let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( + task_manager.spawn_handle(), + storage_override.clone(), + eth_config.eth_log_block_cache, + eth_config.eth_statuses_cache, + prometheus_registry.clone(), + )); // Sinks for pubsub notifications. // Everytime a new subscription is created, a new mpsc channel is added to the sink pool. @@ -231,91 +192,49 @@ pub async fn start_node( > = Default::default(); let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks); - let filter_pool = Some(Arc::new(Mutex::new(BTreeMap::new()))); - let fee_history_cache = Arc::new(Mutex::new(BTreeMap::new())); - // let fee_history_cache_limit = parachain_config.fee_history_limit; - let fee_history_cache_limit = 2048; - let rpc_builder = { let client = client.clone(); - let pool = transaction_pool.clone(); + let is_authority = parachain_config.role.is_authority(); + let transaction_pool = transaction_pool.clone(); let network = network.clone(); let sync_service = sync_service.clone(); - - let is_authority = config.role.is_authority(); - let enable_dev_signer = eth_config.enable_dev_signer; - let max_past_logs = eth_config.max_past_logs; - let execute_gas_limit_multiplier = eth_config.execute_gas_limit_multiplier; - let filter_pool = filter_pool.clone(); let frontier_backend = frontier_backend.clone(); - let pubsub_notification_sinks = pubsub_notification_sinks.clone(); - let overrides = overrides.clone(); let fee_history_cache = fee_history_cache.clone(); - let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new( - task_manager.spawn_handle(), - overrides.clone(), - eth_config.eth_log_block_cache, - eth_config.eth_statuses_cache, - prometheus_registry.clone(), - )); - - let pending_create_inherent_data_providers = move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - // Create a dummy parachain inherent data provider which is required to pass - // the checks by the para chain system. We use dummy values because in the 'pending - // context' neither do we have access to the real values nor do we need them. - let (relay_parent_storage_root, relay_chain_state) = - RelayStateSproofBuilder::default().into_state_root_and_proof(); - let vfp = PersistedValidationData { - // This is a hack to make - // `cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases` happy. Relay - // parent number can't be bigger than u32::MAX. - relay_parent_number: u32::MAX, - relay_parent_storage_root, - ..Default::default() - }; - let parachain_inherent_data = ParachainInherentData { - validation_data: vfp, - relay_chain_state, - downward_messages: Default::default(), - horizontal_messages: Default::default(), - }; - Ok((timestamp, parachain_inherent_data)) - }; + let filter_pool = filter_pool.clone(); + let storage_override = storage_override.clone(); + let pubsub_notification_sinks = pubsub_notification_sinks.clone(); Box::new(move |deny_unsafe, subscription_task_executor| { + let deps = crate::rpc::FullDepsPolkadot { + client: client.clone(), + pool: transaction_pool.clone(), + deny_unsafe, + command_sink: Some(command_sink.clone()), + }; + let module = crate::rpc::create_full_polkadot(deps)?; + let eth_deps = crate::rpc::EthDeps { client: client.clone(), - pool: pool.clone(), - graph: pool.pool().clone(), + pool: transaction_pool.clone(), + graph: transaction_pool.pool().clone(), converter: Some(TransactionConverter), is_authority, - enable_dev_signer, + enable_dev_signer: eth_config.enable_dev_signer, network: network.clone(), - sync: sync_service.clone(), - frontier_backend: match frontier_backend.clone() { - fc_db::Backend::KeyValue(b) => Arc::new(b), - fc_db::Backend::Sql(b) => Arc::new(b), - }, - overrides: overrides.clone(), + sync_service: sync_service.clone(), + frontier_backend: frontier_backend.clone(), + storage_override: storage_override.clone(), block_data_cache: block_data_cache.clone(), filter_pool: filter_pool.clone(), - max_past_logs, + max_past_logs: eth_config.max_past_logs, fee_history_cache: fee_history_cache.clone(), - fee_history_cache_limit, - execute_gas_limit_multiplier, - forced_parent_hashes: None, - pending_create_inherent_data_providers, + fee_history_cache_limit: eth_config.fee_history_limit, + execute_gas_limit_multiplier: eth_config.execute_gas_limit_multiplier, }; - let deps = crate::rpc::FullDepsPolkadot { - client: client.clone(), - pool: pool.clone(), - deny_unsafe, - command_sink: Some(command_sink.clone()), - eth: eth_deps, - }; - crate::rpc::create_full_polkadot( - deps, + + crate::rpc::create_eth( + module, + eth_deps, subscription_task_executor, pubsub_notification_sinks.clone(), ) @@ -328,7 +247,7 @@ pub async fn start_node( client: client.clone(), transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, - config, + config: parachain_config, keystore: params.keystore_container.keystore(), backend: backend.clone(), network: network.clone(), @@ -337,21 +256,20 @@ pub async fn start_node( tx_handler_controller, telemetry: telemetry.as_mut(), })?; - start_network.start_network(); - spawn_frontier_tasks( &task_manager, client.clone(), backend, frontier_backend, filter_pool, - overrides, + storage_override, fee_history_cache, - fee_history_cache_limit, + eth_config.fee_history_limit, sync_service.clone(), pubsub_notification_sinks, - ) - .await; + ); + + start_network.start_network(); Ok((task_manager, client)) } diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 22fc5ab2f..dbf35cc57 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -25,7 +25,7 @@ pub mod collator_kusama; pub mod collator_polkadot; pub mod eth; pub use bifrost_rpc as rpc; -// pub mod dev; +pub mod dev; /// Can be called for a `Configuration` to check if it is a configuration for the `Bifrost` network. pub trait IdentifyVariant {