diff --git a/Cargo.lock b/Cargo.lock index bd6bcff83fd57..f0e0e4e97b054 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4274,8 +4274,10 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "parity-scale-codec", + "sc-consensus", "sc-executor", "sp-application-crypto", + "sp-consensus", "sp-consensus-babe", "sp-core", "sp-externalities", @@ -4542,6 +4544,7 @@ dependencies = [ "sc-cli", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-service", "sp-api", @@ -7086,6 +7089,7 @@ dependencies = [ "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-proposer-metrics", "sc-telemetry", "sc-transaction-pool", @@ -7260,11 +7264,24 @@ name = "sc-consensus" version = "0.10.0-dev" dependencies = [ "async-trait", + "futures 0.3.15", + "futures-timer 3.0.2", + "libp2p", + "log", "parking_lot 0.11.1", "sc-client-api", + "serde", + "sp-api", "sp-blockchain", "sp-consensus", + "sp-core", "sp-runtime", + "sp-state-machine", + "sp-test-primitives", + "sp-utils", + "substrate-prometheus-endpoint", + "thiserror", + "wasm-timer", ] [[package]] @@ -7281,6 +7298,7 @@ dependencies = [ "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-consensus-slots", "sc-executor", "sc-keystore", @@ -7331,6 +7349,7 @@ dependencies = [ "retain_mut", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-consensus-epochs", "sc-consensus-slots", "sc-consensus-uncles", @@ -7422,6 +7441,7 @@ dependencies = [ "parking_lot 0.11.1", "sc-basic-authorship", "sc-client-api", + "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", "sc-transaction-pool", @@ -7457,6 +7477,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "sc-client-api", + "sc-consensus", "sp-api", "sp-block-builder", "sp-blockchain", @@ -7479,6 +7500,7 @@ dependencies = [ "log", "parity-scale-codec", "sc-client-api", + "sc-consensus", "sc-telemetry", "sp-api", "sp-application-crypto", @@ -7695,6 +7717,7 @@ dependencies = [ "rand 0.8.3", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-finality-grandpa", "sc-network", "sc-service", @@ -7797,6 +7820,7 @@ dependencies = [ "rand 0.7.3", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-peerset", "serde", "serde_json", @@ -7889,6 +7913,7 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-keystore", "sc-network", "sc-transaction-pool", @@ -7954,6 +7979,7 @@ dependencies = [ "serde_json", "sp-api", "sp-blockchain", + "sp-consensus", "sp-core", "sp-io", "sp-keystore", @@ -8048,6 +8074,7 @@ dependencies = [ "sc-chain-spec", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-finality-grandpa", "sc-informant", @@ -8110,6 +8137,7 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-light", "sc-network", @@ -8248,6 +8276,7 @@ dependencies = [ "retain_mut", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-transaction-pool-api", "serde", "sp-api", @@ -8847,7 +8876,6 @@ dependencies = [ "async-trait", "futures 0.3.15", "futures-timer 3.0.2", - "libp2p", "log", "parity-scale-codec", "parking_lot 0.11.1", @@ -9762,12 +9790,14 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "sc-block-builder", + "sc-consensus", "sc-executor", "sc-service", "serde", "sp-api", "sp-application-crypto", "sp-block-builder", + "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", "sp-core", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index c19824e9eaa38..c4f23ac10d93c 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -27,7 +27,7 @@ type FullSelectChain = sc_consensus::LongestChain; pub fn new_partial(config: &Configuration) -> Result, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( sc_finality_grandpa::GrandpaBlockImport, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 4886b798b050c..00d412aea39f5 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -46,7 +46,7 @@ pub fn new_partial( config: &Configuration, ) -> Result, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( impl Fn( @@ -605,9 +605,8 @@ mod tests { use std::{sync::Arc, borrow::Cow, convert::TryInto}; use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; use sc_consensus_epochs::descendent_query; - use sp_consensus::{ - Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, - }; + use sp_consensus::{BlockOrigin, Environment, Proposer}; + use sc_consensus::{BlockImportParams, ForkChoiceStrategy, BlockImport,}; use node_primitives::{Block, DigestItem, Signature}; use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 5b0617d6af8e0..47ce4d3f3483f 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -40,6 +40,8 @@ pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/tra pallet-treasury = { version = "4.0.0-dev", path = "../../../frame/treasury" } sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-externalities = { version = "0.10.0-dev", path = "../../../primitives/externalities" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 4e17366795909..6ba2bae3eb317 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -836,8 +836,8 @@ fn full_wasm_block_import_works_with_changes_trie() { fn should_import_block_with_test_client() { use node_testing::client::{ ClientBlockImportExt, TestClientBuilderExt, TestClientBuilder, - sp_consensus::BlockOrigin, }; + use sp_consensus::BlockOrigin; let mut client = TestClientBuilder::new().build(); let block1 = changes_trie_block(); diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index e2a4555e6797c..656f9331c5af9 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -17,6 +17,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } sc-service = { version = "0.10.0-dev", features = ["test-helpers", "db"], path = "../../../client/service" } sc-client-db = { version = "0.10.0-dev", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } codec = { package = "parity-scale-codec", version = "2.0.0" } pallet-contracts = { version = "4.0.0-dev", path = "../../../frame/contracts" } pallet-grandpa = { version = "4.0.0-dev", path = "../../../frame/grandpa" } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index edb99c617771a..c1499f207cb00 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -29,10 +29,11 @@ use crate::client::{Client, Backend}; use crate::keyring::*; use sc_client_db::PruningMode; use sc_executor::{NativeExecutor, WasmExecutionMethod}; -use sp_consensus::{ - BlockOrigin, BlockImport, BlockImportParams, +use sc_consensus::{ + BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux }; +use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, OpaqueExtrinsic, diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index b09995f887c4a..b6ef397681d50 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -41,7 +41,6 @@ use sp_consensus::BlockOrigin; use parking_lot::RwLock; pub use sp_state_machine::Backend as StateBackend; -pub use sp_consensus::ImportedState; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 469df55cf0233..4e13e0dc2e6ef 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -32,5 +32,6 @@ sc-proposer-metrics = { version = "0.9.0", path = "../proposer-metrics" } [dev-dependencies] sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } +sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } parking_lot = "0.11.1" diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 590f4275bf760..aead0080265b6 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -468,7 +468,7 @@ mod tests { use super::*; use parking_lot::Mutex; - use sp_consensus::{BlockOrigin, Proposer}; + use sp_consensus::{Proposer,BlockOrigin}; use substrate_test_runtime_client::{ prelude::*, TestClientBuilder, runtime::{Extrinsic, Transfer}, TestClientBuilderExt, }; diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index f5a8aaf9dadbb..c23ad55505766 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -20,6 +20,7 @@ sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } codec = { package = "parity-scale-codec", version = "2.0.0" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } derive_more = "0.99.2" futures = "0.3.9" diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index c3faa5382686e..d5c369e6195c1 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -26,11 +26,15 @@ use log::{debug, info, trace}; use prometheus_endpoint::Registry; use codec::{Encode, Decode, Codec}; use sp_consensus::{ - BlockImport, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, + BlockOrigin, + CanAuthorWith, + Error as ConsensusError, +}; +use sc_consensus::{ + block_import::{BlockImport, ForkChoiceStrategy, BlockImportParams}, import_queue::{ Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, - }, + } }; use sc_client_api::{BlockOf, UsageProvider, backend::AuxStore}; use sp_blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, ProvideCache, HeaderBackend}; diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 72545eda077ba..a7b1d06e5396e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -41,8 +41,11 @@ use log::{debug, trace}; use codec::{Encode, Decode, Codec}; use sp_consensus::{ - BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, SelectChain, StateAction, + BlockOrigin, Environment, Proposer, CanAuthorWith, + Error as ConsensusError, SelectChain, +}; +use sc_consensus::{ + BlockImport,StateAction,ForkChoiceStrategy, BlockImportParams }; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; use sp_blockchain::{Result as CResult, ProvideCache, HeaderBackend}; @@ -179,7 +182,7 @@ where PF: Environment + Send + Sync + 'static, PF::Proposer: Proposer>, SO: SyncOracle + Send + Sync + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders + Send, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, @@ -271,7 +274,7 @@ where I: BlockImport> + Send + Sync + 'static, Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { AuraWorker { @@ -318,7 +321,7 @@ where P::Public: AppPublic + Public + Member + Encode + Decode + Hash, P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, SO: SyncOracle + Send + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, Error: std::error::Error + Send + From + 'static, { @@ -389,7 +392,7 @@ where Self::Claim, Self::EpochData, ) -> Result< - sp_consensus::BlockImportParams>, + sc_consensus::BlockImportParams>, sp_consensus::Error> + Send + 'static> { let keystore = self.keystore.clone(); @@ -422,7 +425,7 @@ where import_block.post_digests.push(signature_digest_item); import_block.body = Some(body); import_block.state_action = StateAction::ApplyChanges( - sp_consensus::StorageChanges::Changes(storage_changes) + sc_consensus::StorageChanges::Changes(storage_changes) ); import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); @@ -547,8 +550,9 @@ mod tests { use super::*; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, AlwaysCanAuthor, DisableProofRecording, - import_queue::BoxJustificationImport, SlotData, + SlotData, }; + use sc_consensus::BoxJustificationImport; use sc_network_test::{Block as TestBlock, *}; use sp_runtime::traits::{Block as BlockT, DigestFor}; use sc_network::config::ProtocolConfig; diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index e76e293df5bb4..e6538cb57aae6 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 61b58bf1b5999..25c10862f01ed 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -96,12 +96,16 @@ use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{ Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, }; -use sp_consensus::{import_queue::BoxJustificationImport, CanAuthorWith, ImportResult}; use sp_consensus::{ - import_queue::{BasicQueue, CacheKeyId, DefaultImportQueue, Verifier}, - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Environment, - Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, SlotData, - StateAction, + import_queue::CacheKeyId, BlockOrigin, Environment, CanAuthorWith, + Error as ConsensusError, Proposer, SelectChain, SlotData, +}; +use sc_consensus::{ + import_queue::{BoxJustificationImport, BasicQueue, DefaultImportQueue, Verifier}, + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, + ForkChoiceStrategy, ImportResult, StateAction + }, }; use sp_consensus_babe::inherents::BabeInherentData; use sp_consensus_slots::Slot; @@ -461,7 +465,7 @@ where + Sync + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, - L: sp_consensus::JustificationSyncLink + 'static, + L: sc_consensus::JustificationSyncLink + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, @@ -502,7 +506,13 @@ where let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); - let answer_requests = answer_requests(worker_rx, config.0, client, babe_link.epoch_changes.clone()); + let answer_requests = answer_requests( + worker_rx, + config.0, + client, + babe_link.epoch_changes.clone(), + ); + Ok(BabeWorker { inner: Box::pin(future::join(inner, answer_requests).map(|_| ())), slot_notification_sinks, @@ -654,7 +664,7 @@ where E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, SO: SyncOracle + Send + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy>, Error: std::error::Error + Send + From + From + 'static, { @@ -761,7 +771,7 @@ where Self::Claim, Self::EpochData, ) -> Result< - sp_consensus::BlockImportParams, + sc_consensus::BlockImportParams, sp_consensus::Error> + Send + 'static> { let keystore = self.keystore.clone(); @@ -792,7 +802,7 @@ where import_block.post_digests.push(digest_item); import_block.body = Some(body); import_block.state_action = StateAction::ApplyChanges( - sp_consensus::StorageChanges::Changes(storage_changes) + sc_consensus::StorageChanges::Changes(storage_changes) ); import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), @@ -890,8 +900,10 @@ fn find_next_epoch_digest(header: &B::Header) trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, epoch_digest.is_some()) { - (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), - (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), + (Some(ConsensusLog::NextEpochData(_)), true) => + return Err(babe_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(epoch)), false) => + epoch_digest = Some(epoch), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } } @@ -909,8 +921,10 @@ fn find_next_config_digest(header: &B::Header) trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, config_digest.is_some()) { - (Some(ConsensusLog::NextConfigData(_)), true) => return Err(babe_err(Error::MultipleConfigChangeDigests)), - (Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config), + (Some(ConsensusLog::NextConfigData(_)), true) => + return Err(babe_err(Error::MultipleConfigChangeDigests)), + (Some(ConsensusLog::NextConfigData(config)), false) => + config_digest = Some(config), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } } @@ -1630,8 +1644,11 @@ pub fn import_queue( can_author_with: CAW, telemetry: Option, ) -> ClientResult> where - Inner: BlockImport> - + Send + Sync + 'static, + Inner: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor + > + Send + Sync + 'static, Client: ProvideRuntimeApi + ProvideCache + HeaderBackend + HeaderMetadata + AuxStore + Send + Sync + 'static, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 3392ffade98ee..e2140b66936fc 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -36,8 +36,8 @@ use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, DisableProofRecording, AlwaysCanAuthor, - import_queue::{BoxBlockImport, BoxJustificationImport}, }; +use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_network_test::{Block as TestBlock, *}; use sc_network::config::ProtocolConfig; use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index c8d86b06115a3..c34e5416f84b2 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -13,9 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1" +thiserror = "1.0.21" +libp2p = { version = "0.37.1", default-features = false } +log = "0.4.8" +futures = { version = "0.3.1", features = ["thread-pool"] } +futures-timer = "3.0.1" sc-client-api = { version = "4.0.0-dev", path = "../../api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { path = "../../../primitives/core", version = "4.0.0-dev"} +sp-consensus = { path = "../../../primitives/consensus/common", version = "0.10.0-dev"} +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-utils = { version = "4.0.0-dev", path = "../../../primitives/utils" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } parking_lot = "0.11.1" +serde = { version = "1.0", features = ["derive"] } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" } +wasm-timer = "0.2.5" +async-trait = "0.1.42" + +[dev-dependencies] +sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" } diff --git a/primitives/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs similarity index 94% rename from primitives/consensus/common/src/block_import.rs rename to client/consensus/common/src/block_import.rs index a444e15095ef6..50f2648019874 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -25,8 +25,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::any::Any; -use crate::Error; -use crate::import_queue::CacheKeyId; +use sp_consensus::{BlockOrigin, Error}; +use sp_consensus::import_queue::CacheKeyId; /// Block import result. #[derive(Debug, PartialEq, Eq)] @@ -94,23 +94,6 @@ impl ImportResult { } } -/// Block data origin. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum BlockOrigin { - /// Genesis block built into the client. - Genesis, - /// Block is part of the initial sync with the network. - NetworkInitialSync, - /// Block was broadcasted on the network. - NetworkBroadcast, - /// Block that was received from the network and validated in the consensus process. - ConsensusBroadcast, - /// Block that was collated by this node. - Own, - /// Block was imported from a file. - File, -} - /// Fork choice strategy. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ForkChoiceStrategy { @@ -271,9 +254,11 @@ impl BlockImportParams { /// Auxiliary function for "converting" the transaction type. /// - /// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that `Self` now - /// uses a different transaction type. - pub fn clear_storage_changes_and_mutate(self) -> BlockImportParams { + /// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc + /// think that `Self` now uses a different transaction type. + pub fn clear_storage_changes_and_mutate(self) + -> BlockImportParams + { // Preserve imported state. let state_action = match self.state_action { StateAction::ApplyChanges(StorageChanges::Import(state)) => @@ -356,7 +341,7 @@ impl BlockImport for crate::import_queue::BoxBlockImp where Transaction: Send + 'static, { - type Error = crate::error::Error; + type Error = sp_consensus::error::Error; type Transaction = Transaction; /// Check block preconditions. diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs new file mode 100644 index 0000000000000..50ddb48120e0b --- /dev/null +++ b/client/consensus/common/src/import_queue.rs @@ -0,0 +1,307 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Import Queue primitive: something which can verify and import blocks. +//! +//! This serves as an intermediate and abstracted step between synchronization +//! and import. Each mode of consensus will have its own requirements for block +//! verification. Some algorithms can verify in parallel, while others only +//! sequentially. +//! +//! The `ImportQueue` trait allows such verification strategies to be +//! instantiated. The `BasicQueue` and `BasicVerifier` traits allow serial +//! queues to be instantiated simply. + +use std::collections::HashMap; + +use sp_runtime::{Justifications, traits::{Block as BlockT, Header as _, NumberFor}}; + +use sp_consensus::{BlockOrigin, error::Error as ConsensusError, import_queue::CacheKeyId}; +use crate::{ + block_import::{ + BlockImport, BlockImportParams, ImportedAux, JustificationImport, ImportResult, + BlockCheckParams, ImportedState, StateAction, + }, + metrics::Metrics, +}; +pub use basic_queue::BasicQueue; + +/// A commonly-used Import Queue type. +/// +/// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. +pub type DefaultImportQueue = + BasicQueue>; + +mod basic_queue; +pub mod buffered_link; + +/// Shared block import struct used by the queue. +pub type BoxBlockImport = Box< + dyn BlockImport + Send + Sync +>; + +/// Shared justification import struct used by the queue. +pub type BoxJustificationImport = Box< + dyn JustificationImport + Send + Sync +>; + +/// Maps to the Origin used by the network. +pub type Origin = libp2p::PeerId; + +/// Block data used by the queue. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct IncomingBlock { + /// Block header hash. + pub hash: ::Hash, + /// Block header if requested. + pub header: Option<::Header>, + /// Block body if requested. + pub body: Option::Extrinsic>>, + /// Indexed block body if requested. + pub indexed_body: Option>>, + /// Justification(s) if requested. + pub justifications: Option, + /// The peer, we received this from + pub origin: Option, + /// Allow importing the block skipping state verification if parent state is missing. + pub allow_missing_state: bool, + /// Skip block execution and state verification. + pub skip_execution: bool, + /// Re-validate existing block. + pub import_existing: bool, + /// Do not compute new state, but rather set it to the given set. + pub state: Option>, +} + +/// Verify a justification of a block +#[async_trait::async_trait] +pub trait Verifier: Send + Sync { + /// Verify the given data and return the BlockImportParams and an optional + /// new set of validators to import. If not, err with an Error-Message + /// presented to the User in the logs. + async fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justifications: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String>; +} + +/// Blocks import queue API. +/// +/// The `import_*` methods can be called in order to send elements for the import queue to verify. +/// Afterwards, call `poll_actions` to determine how to respond to these elements. +pub trait ImportQueue: Send { + /// Import bunch of blocks. + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); + /// Import block justifications. + fn import_justifications( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justifications: Justifications + ); + /// Polls for actions to perform on the network. + /// + /// This method should behave in a way similar to `Future::poll`. It can register the current + /// task and notify later when more actions are ready to be polled. To continue the comparison, + /// it is as if this method always returned `Poll::Pending`. + fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); +} + +/// Hooks that the verification queue can use to influence the synchronization +/// algorithm. +pub trait Link: Send { + /// Batch of blocks imported, with or without error. + fn blocks_processed( + &mut self, + _imported: usize, + _count: usize, + _results: Vec<(BlockImportResult, B::Hash)> + ) {} + + /// Justification import result. + fn justification_imported( + &mut self, + _who: Origin, + _hash: &B::Hash, + _number: NumberFor, + _success: bool) {} + + /// Request a justification for the given block. + fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} +} + +/// Block import successful result. +#[derive(Debug, PartialEq)] +pub enum BlockImportStatus { + /// Imported known block. + ImportedKnown(N, Option), + /// Imported unknown block. + ImportedUnknown(N, ImportedAux, Option), +} + +/// Block import error. +#[derive(Debug)] +pub enum BlockImportError { + /// Block missed header, can't be imported + IncompleteHeader(Option), + /// Block verification failed, can't be imported + VerificationFailed(Option, String), + /// Block is known to be Bad + BadBlock(Option), + /// Parent state is missing. + MissingState, + /// Block has an unknown parent + UnknownParent, + /// Block import has been cancelled. This can happen if the parent block fails to be imported. + Cancelled, + /// Other error. + Other(ConsensusError), +} + +type BlockImportResult = Result>, BlockImportError>; + +/// Single block import function. +pub async fn import_single_block, Transaction: Send + 'static>( + import_handle: &mut impl BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: &mut V, +) -> BlockImportResult { + import_single_block_metered(import_handle, block_origin, block, verifier, None).await +} + +/// Single block import function with metering. +pub(crate) async fn import_single_block_metered< + B: BlockT, + V: Verifier, + Transaction: Send + 'static> +( + import_handle: &mut impl BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: &mut V, + metrics: Option, +) -> BlockImportResult { + let peer = block.origin; + + let (header, justifications) = match (block.header, block.justifications) { + (Some(header), justifications) => (header, justifications), + (None, _) => { + if let Some(ref peer) = peer { + debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); + } else { + debug!(target: "sync", "Header {} was not provided ", block.hash); + } + return Err(BlockImportError::IncompleteHeader(peer)) + }, + }; + + trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); + + let number = header.number().clone(); + let hash = header.hash(); + let parent_hash = header.parent_hash().clone(); + + let import_handler = |import| { + match import { + Ok(ImportResult::AlreadyInChain) => { + trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); + Ok(BlockImportStatus::ImportedKnown(number, peer.clone())) + }, + Ok(ImportResult::Imported(aux)) => { + Ok(BlockImportStatus::ImportedUnknown(number, aux, peer.clone())) + }, + Ok(ImportResult::MissingState) => { + debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", + number, hash, parent_hash); + Err(BlockImportError::MissingState) + }, + Ok(ImportResult::UnknownParent) => { + debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", + number, hash, parent_hash); + Err(BlockImportError::UnknownParent) + }, + Ok(ImportResult::KnownBad) => { + debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); + Err(BlockImportError::BadBlock(peer.clone())) + }, + Err(e) => { + debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); + Err(BlockImportError::Other(e)) + } + } + }; + + match import_handler(import_handle.check_block(BlockCheckParams { + hash, + number, + parent_hash, + allow_missing_state: block.allow_missing_state, + import_existing: block.import_existing, + }).await)? { + BlockImportStatus::ImportedUnknown { .. } => (), + r => return Ok(r), // Any other successful result means that the block is already imported. + } + + let started = wasm_timer::Instant::now(); + let (mut import_block, maybe_keys) = verifier.verify( + block_origin, + header, + justifications, + block.body + ).await.map_err(|msg| { + if let Some(ref peer) = peer { + trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); + } else { + trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); + } + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(false, started.elapsed()); + } + BlockImportError::VerificationFailed(peer.clone(), msg) + })?; + + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(true, started.elapsed()); + } + + let mut cache = HashMap::new(); + if let Some(keys) = maybe_keys { + cache.extend(keys.into_iter()); + } + import_block.import_existing = block.import_existing; + import_block.indexed_body = block.indexed_body; + let mut import_block = import_block.clear_storage_changes_and_mutate(); + if let Some(state) = block.state { + let changes = crate::block_import::StorageChanges::Import(state); + import_block.state_action = StateAction::ApplyChanges(changes); + } else if block.skip_execution { + import_block.state_action = StateAction::Skip; + } else if block.allow_missing_state { + import_block.state_action = StateAction::ExecuteIfPossible; + } + + let imported = import_handle.import_block(import_block, cache).await; + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification_and_import(started.elapsed()); + } + import_handler(imported) +} diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs similarity index 94% rename from primitives/consensus/common/src/import_queue/basic_queue.rs rename to client/consensus/common/src/import_queue/basic_queue.rs index 8dd40d84df305..7e963f23b9f5d 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -18,14 +18,17 @@ use std::{pin::Pin, time::Duration, marker::PhantomData}; use futures::{prelude::*, task::Context, task::Poll}; use futures_timer::Delay; -use sp_runtime::{Justification, Justifications, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; +use sp_runtime::{ + Justification, Justifications, + traits::{Block as BlockT, Header as HeaderT, NumberFor} +}; use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded, TracingUnboundedReceiver}; use prometheus_endpoint::Registry; +use sp_consensus::BlockOrigin; use crate::{ - block_import::BlockOrigin, import_queue::{ - BlockImportResult, BlockImportError, Verifier, BoxBlockImport, + BlockImportStatus, BlockImportError, Verifier, BoxBlockImport, BoxJustificationImport, ImportQueue, Link, Origin, IncomingBlock, import_single_block_metered, buffered_link::{self, BufferedLinkSender, BufferedLinkReceiver}, @@ -145,14 +148,18 @@ mod worker_messages { use super::*; pub struct ImportBlocks(pub BlockOrigin, pub Vec>); - pub struct ImportJustification(pub Origin, pub B::Hash, pub NumberFor, pub Justification); + pub struct ImportJustification( + pub Origin, + pub B::Hash, + pub NumberFor, + pub Justification); } /// The process of importing blocks. /// -/// This polls the `block_import_receiver` for new blocks to import and than awaits on importing these blocks. -/// After each block is imported, this async function yields once to give other futures the possibility -/// to be run. +/// This polls the `block_import_receiver` for new blocks to import and than awaits on +/// importing these blocks. After each block is imported, this async function yields once +/// to give other futures the possibility to be run. /// /// Returns when `block_import` ended. async fn block_import_process( @@ -327,12 +334,13 @@ struct ImportManyBlocksResult { /// The total number of blocks processed. block_count: usize, /// The import results for each block. - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, } /// Import several blocks at once, returning import result for each block. /// -/// This will yield after each imported block once, to ensure that other futures can be called as well. +/// This will yield after each imported block once, to ensure that other futures can +/// be called as well. async fn import_many_blocks, Transaction: Send + 'static>( import_handle: &mut BoxBlockImport, blocks_origin: BlockOrigin, @@ -411,11 +419,11 @@ async fn import_many_blocks, Transaction: Send + 'stat } } -/// A future that will always `yield` on the first call of `poll` but schedules the current task for -/// re-execution. +/// A future that will always `yield` on the first call of `poll` but schedules the +/// current task for re-execution. /// -/// This is done by getting the waker and calling `wake_by_ref` followed by returning `Pending`. -/// The next time the `poll` is called, it will return `Ready`. +/// This is done by getting the waker and calling `wake_by_ref` followed by returning +/// `Pending`. The next time the `poll` is called, it will return `Ready`. struct Yield(bool); impl Yield { @@ -443,7 +451,9 @@ mod tests { use super::*; use crate::{ import_queue::{CacheKeyId, Verifier}, - BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, + }; + use crate::block_import::{ + BlockImportParams, ImportResult, BlockCheckParams, BlockImport, JustificationImport }; use futures::{executor::block_on, Future}; use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header}; @@ -464,7 +474,7 @@ mod tests { #[async_trait::async_trait] impl BlockImport for () { - type Error = crate::Error; + type Error = sp_consensus::Error; type Transaction = Extrinsic; async fn check_block( @@ -485,7 +495,7 @@ mod tests { #[async_trait::async_trait] impl JustificationImport for () { - type Error = crate::Error; + type Error = sp_consensus::Error; async fn on_start(&mut self) -> Vec<(Hash, BlockNumber)> { Vec::new() @@ -517,7 +527,7 @@ mod tests { &mut self, _imported: usize, _count: usize, - results: Vec<(Result, BlockImportError>, Hash)>, + results: Vec<(Result, BlockImportError>, Hash)>, ) { if let Some(hash) = results.into_iter().find_map(|(r, h)| r.ok().map(|_| h)) { self.events.push(Event::BlockImported(hash)); diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs similarity index 92% rename from primitives/consensus/common/src/import_queue/buffered_link.rs rename to client/consensus/common/src/import_queue/buffered_link.rs index 0295f704c4efc..a04e774ab5472 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -22,8 +22,8 @@ //! # Example //! //! ``` -//! use sp_consensus::import_queue::Link; -//! # use sp_consensus::import_queue::buffered_link::buffered_link; +//! use sc_consensus::import_queue::Link; +//! # use sc_consensus::import_queue::buffered_link::buffered_link; //! # use sp_test_primitives::Block; //! # struct DummyLink; impl Link for DummyLink {} //! # let mut my_link = DummyLink; @@ -42,7 +42,9 @@ use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; use std::{pin::Pin, task::Context, task::Poll}; -use crate::import_queue::{Origin, Link, BlockImportResult, BlockImportError}; +use crate::import_queue::{Origin, Link}; + +use super::BlockImportResult; /// Wraps around an unbounded channel from the `futures` crate. The sender implements `Link` and /// can be used to buffer commands, and the receiver can be used to poll said commands and transfer @@ -78,7 +80,11 @@ impl Clone for BufferedLinkSender { /// Internal buffered message. enum BlockImportWorkerMsg { - BlocksProcessed(usize, usize, Vec<(Result>, BlockImportError>, B::Hash)>), + BlocksProcessed( + usize, + usize, + Vec<(BlockImportResult, B::Hash)> + ), JustificationImported(Origin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), } @@ -88,7 +94,7 @@ impl Link for BufferedLinkSender { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(BlockImportResult, B::Hash)> ) { let _ = self.tx.unbounded_send(BlockImportWorkerMsg::BlocksProcessed(imported, count, results)); } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 9b4d705769196..9262fba508a40 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -18,6 +18,22 @@ //! Collection of common consensus specific implementations +#[macro_use] extern crate log; + +pub mod import_queue; +pub mod block_import; +pub mod metrics; + +pub use import_queue::{ + BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, import_single_block, + BlockImportError, BlockImportStatus, IncomingBlock, ImportQueue, Link, DefaultImportQueue +}; +pub use block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, + ImportResult, ImportedAux, ImportedState, JustificationImport, + JustificationSyncLink, StateAction, StorageChanges +}; + mod longest_chain; pub mod shared_data; diff --git a/primitives/consensus/common/src/metrics.rs b/client/consensus/common/src/metrics.rs similarity index 96% rename from primitives/consensus/common/src/metrics.rs rename to client/consensus/common/src/metrics.rs index 29d39436cbefc..884d89c1266cf 100644 --- a/primitives/consensus/common/src/metrics.rs +++ b/client/consensus/common/src/metrics.rs @@ -23,7 +23,7 @@ use prometheus_endpoint::{ use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::import_queue::{BlockImportResult, BlockImportError}; +use crate::import_queue::{BlockImportStatus, BlockImportError}; /// Generic Prometheus metrics for common consensus functionality. #[derive(Clone)] @@ -77,7 +77,7 @@ impl Metrics { pub fn report_import( &self, - result: &Result>, BlockImportError>, + result: &Result>, BlockImportError>, ) { let label = match result { Ok(_) => "success", diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 8a236b0591b86..a0de596b005b7 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -26,6 +26,7 @@ assert_matches = "1.3.0" async-trait = "0.1.50" sc-client-api = { path = "../../api", version = "4.0.0-dev"} +sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-consensus-babe = { path = "../../consensus/babe", version = "0.10.0-dev"} sc-consensus-epochs = { path = "../../consensus/epochs", version = "0.10.0-dev"} sp-consensus-babe = { path = "../../../primitives/consensus/babe", version = "0.10.0-dev"} diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index 0cfd99cab5c99..93828e2dced08 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -21,7 +21,7 @@ use super::Error; use sp_runtime::traits::{Block as BlockT, DigestFor}; use sp_inherents::InherentData; -use sp_consensus::BlockImportParams; +use sc_consensus::BlockImportParams; pub mod babe; diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index fb2d47b48fed1..6b574bcc7e1af 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -32,7 +32,8 @@ use sp_keystore::SyncCryptoStorePtr; use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_consensus::{BlockImportParams, BlockOrigin, ForkChoiceStrategy}; +use sp_consensus::BlockOrigin; +use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_consensus_slots::Slot; use sp_consensus_babe::{ BabeApi, inherents::BabeInherentData, ConsensusLog, BABE_ENGINE_ID, AuthorityId, @@ -44,7 +45,7 @@ use sp_runtime::{ generic::{Digest, BlockId}, Justifications, }; use sp_timestamp::{InherentType, INHERENT_IDENTIFIER, TimestampInherentData}; -use sp_consensus::import_queue::{Verifier, CacheKeyId}; +use sp_consensus::import_queue::CacheKeyId; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index 77140c835a3ee..25af739db8126 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -19,7 +19,8 @@ //! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. //! This is suitable for a testing environment. -use sp_consensus::{Error as ConsensusError, ImportResult}; +use sp_consensus::{Error as ConsensusError}; +use sc_consensus::ImportResult; use sp_blockchain::Error as BlockchainError; use sp_inherents::Error as InherentsError; use futures::channel::{oneshot, mpsc::SendError}; diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 5d93f6724ee9f..1e44f5e55b767 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -21,9 +21,12 @@ use futures::prelude::*; use sp_consensus::{ - Environment, Proposer, SelectChain, BlockImport, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, - import_queue::{Verifier, BasicQueue, CacheKeyId, BoxBlockImport}, + BlockOrigin, Environment, Proposer, SelectChain, + import_queue::CacheKeyId, +}; +use sc_consensus::{ + block_import::{BlockImport, ForkChoiceStrategy, BlockImportParams}, + import_queue::{Verifier, BasicQueue, BoxBlockImport}, }; use sp_blockchain::HeaderBackend; use sp_inherents::CreateInherentDataProviders; @@ -278,7 +281,7 @@ mod tests { use substrate_test_runtime_transaction_pool::{TestApi, uxt}; use sc_transaction_pool_api::{TransactionPool, MaintainedTransactionPool, TransactionSource}; use sp_runtime::generic::BlockId; - use sp_consensus::ImportedAux; + use sc_consensus::ImportedAux; use sc_basic_authorship::ProposerFactory; use sc_client_api::BlockBackend; diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index eb056f22fed8b..349d2c3372d85 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -18,7 +18,7 @@ //! RPC interface for the `ManualSeal` Engine. -use sp_consensus::ImportedAux; +use sc_consensus::ImportedAux; use jsonrpc_core::Error; use jsonrpc_derive::rpc; use futures::{ diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 450a7bff4cd40..c888a13e73452 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -25,9 +25,9 @@ use sp_runtime::{ generic::BlockId, }; use futures::prelude::*; -use sp_consensus::{ - self, BlockImport, Environment, Proposer, ForkChoiceStrategy, - BlockImportParams, BlockOrigin, ImportResult, SelectChain, StateAction, +use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; +use sc_consensus::{ + BlockImportParams, BlockImport, ImportResult, StateAction, ForkChoiceStrategy }; use sp_blockchain::HeaderBackend; use std::collections::HashMap; @@ -146,7 +146,7 @@ pub async fn seal_block( params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); params.state_action = StateAction::ApplyChanges( - sp_consensus::StorageChanges::Changes(proposal.storage_changes) + sc_consensus::StorageChanges::Changes(proposal.storage_changes) ); if let Some(digest_provider) = digest_provider { diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 77ed9ba04ce95..368005fafb136 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -23,6 +23,7 @@ sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-bu sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-consensus-pow = { version = "0.10.0-dev", path = "../../../primitives/consensus/pow" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } futures-timer = "3.0.1" diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index e71726564ebe5..8b9b43a1c3a3e 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -59,10 +59,11 @@ use sp_api::ProvideRuntimeApi; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_consensus::{ - BlockImportParams, BlockOrigin, ForkChoiceStrategy, SyncOracle, Environment, Proposer, - SelectChain, Error as ConsensusError, CanAuthorWith, BlockImport, BlockCheckParams, ImportResult, + BlockOrigin, SyncOracle, Environment, Proposer, + SelectChain, Error as ConsensusError, CanAuthorWith }; -use sp_consensus::import_queue::{ +use sc_consensus::{ + BlockImportParams, ForkChoiceStrategy, BlockImport, BlockCheckParams, ImportResult, BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, }; use codec::{Encode, Decode}; @@ -557,7 +558,7 @@ where E::Error: std::fmt::Debug, E::Proposer: Proposer>, SO: SyncOracle + Clone + Send + Sync + 'static, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders, CAW: CanAuthorWith + Clone + Send + 'static, { diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 74fbcce81341d..75cfa7cadd9f0 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -18,8 +18,8 @@ use std::{pin::Pin, time::Duration, collections::HashMap, borrow::Cow}; use sc_client_api::ImportNotifications; -use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, StorageChanges, - StateAction, import_queue::BoxBlockImport}; +use sp_consensus::{BlockOrigin, Proposal}; +use sc_consensus::{BlockImportParams, StorageChanges, StateAction, BoxBlockImport}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, @@ -62,7 +62,7 @@ pub struct MiningWorker< Block: BlockT, Algorithm: PowAlgorithm, C: sp_api::ProvideRuntimeApi, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, Proof, > { pub(crate) build: Option>, @@ -77,7 +77,7 @@ where C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, Algorithm::Difficulty: 'static + Send, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, sp_api::TransactionFor: Send + 'static, { /// Get the current best hash. `None` if the worker has just started or the client is doing diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 22697e94d358d..4e027ccab7722 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -21,6 +21,7 @@ sp-trie = { version = "4.0.0-dev", path = "../../../primitives/trie" } sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } sp-arithmetic = { version = "4.0.0-dev", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index c410f173e90ab..8a8e1c5471ef1 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -40,8 +40,9 @@ use log::{debug, error, info, warn}; use sp_api::{ProvideRuntimeApi, ApiRef}; use sp_arithmetic::traits::BaseArithmetic; use sp_consensus::{ - BlockImport, CanAuthorWith, JustificationSyncLink, Proposer, SelectChain, SlotData, SyncOracle, + CanAuthorWith, Proposer, SelectChain, SlotData, SyncOracle, }; +use sc_consensus::{BlockImport, JustificationSyncLink}; use sp_consensus_slots::Slot; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ @@ -163,7 +164,7 @@ pub trait SimpleSlotWorker { Self::Claim, Self::EpochData, ) -> Result< - sp_consensus::BlockImportParams>::Transaction>, + sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error > + Send + 'static >; diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml index 43a7cc0565cde..62fe596083336 100644 --- a/client/finality-grandpa-warp-sync/Cargo.toml +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -32,5 +32,6 @@ finality-grandpa = { version = "0.14.1" } rand = "0.8" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index ebb26a28c3485..cb069eb7a988d 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -27,8 +27,11 @@ use sc_telemetry::TelemetryHandle; use sp_api::TransactionFor; use sp_blockchain::{well_known_cache_keys, BlockStatus}; use sp_consensus::{ - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Error as ConsensusError, - ImportResult, JustificationImport, SelectChain, + BlockOrigin, Error as ConsensusError, SelectChain, +}; +use sc_consensus::{ + BlockCheckParams, BlockImport, BlockImportParams, + ImportResult, JustificationImport }; use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 6c3f0f6af37a8..99ac43b3336e2 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -72,7 +72,8 @@ use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{NumberFor, Block as BlockT, DigestFor, Zero}; -use sp_consensus::{SelectChain, BlockImport}; +use sp_consensus::SelectChain; +use sc_consensus::BlockImport; use sp_core::{ crypto::Public, }; diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 725beec6a94b2..80d5c5ff37d3e 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -34,9 +34,10 @@ use sp_keyring::Ed25519Keyring; use sp_blockchain::Result; use sp_api::{ApiRef, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; -use sp_consensus::{ - BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, - import_queue::BoxJustificationImport, +use sp_consensus::BlockOrigin; +use sc_consensus::{ + ForkChoiceStrategy, ImportedAux, BlockImportParams, + ImportResult, BlockImport, BoxJustificationImport }; use std::{collections::{HashMap, HashSet}, pin::Pin}; use sp_runtime::{Justifications, traits::{Block as BlockT, Header as HeaderT}}; @@ -243,7 +244,11 @@ fn create_keystore(authority: Ed25519Keyring) -> (SyncCryptoStorePtr, tempfile:: (keystore, keystore_path) } -fn block_until_complete(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { +fn block_until_complete( + future: impl Future + Unpin, + net: &Arc>, + runtime: &mut Runtime) +{ let drive_to_completion = futures::future::poll_fn(|cx| { net.lock().poll(cx); Poll::<()>::Pending }); @@ -547,7 +552,8 @@ fn transition_3_voters_twice_1_full_observer() { assert_eq!(full_client.chain_info().best_number, 1, "Peer #{} failed to sync", i); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -616,8 +622,10 @@ fn transition_3_voters_twice_1_full_observer() { .take_while(|n| future::ready(n.header.number() < &30)) .for_each(move |_| future::ready(())) .map(move |()| { - let full_client = client.as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let full_client = client.as_full() + .expect("only full clients are used in test"); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -815,8 +823,10 @@ fn force_change_to_new_set() { assert_eq!(peer.client().info().best_number, 26, "Peer #{} failed to sync", i); - let full_client = peer.client().as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let full_client = peer.client().as_full() + .expect("only full clients are used in test"); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (1, voters.as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -1144,7 +1154,8 @@ fn voter_persists_its_votes() { // by `Sink::poll_complete` to make sure items are being flushed. Given that // we send in a loop including a delay until items are received, this can be // ignored for the sake of reduced complexity. - Pin::new(&mut *round_tx.lock()).start_send(finality_grandpa::Message::Prevote(prevote)).unwrap(); + Pin::new(&mut *round_tx.lock()) + .start_send(finality_grandpa::Message::Prevote(prevote)).unwrap(); } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 1 { // the next message we receive should be our own prevote let prevote = match signed.message { @@ -1231,7 +1242,9 @@ fn voter_catches_up_to_latest_round_when_behind() { let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); - let voter = |keystore, peer_id, link, net: Arc>| -> Pin + Send>> { + let voter = |keystore, peer_id, link, net: Arc>| + -> Pin + Send>> + { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 7ca98150f9dd8..9c6b580fb9c66 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -53,6 +53,7 @@ smallvec = "1.5.0" sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sp-utils = { version = "4.0.0-dev", path = "../../primitives/utils" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 576c49d1da366..b79dee4410cd8 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -36,7 +36,8 @@ use libp2p::swarm::{ }; use log::debug; use prost::Message; -use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; +use sp_consensus::BlockOrigin; +use sc_consensus::import_queue::{IncomingBlock, Origin}; use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justifications}; use std::{ borrow::Cow, diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 32d4cc9ff024f..cf235678fd85a 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -21,7 +21,8 @@ use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sc_client_api::{BlockBackend, ProofProvider}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; -pub use sc_client_api::{StorageKey, StorageData, ImportedState}; +pub use sc_client_api::{StorageKey, StorageData}; +pub use sc_consensus::ImportedState; /// Local client abstraction for the network. pub trait Client: HeaderBackend + ProofProvider + BlockIdTo diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 8cc467a7fb9fd..cb16f31495eba 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -44,7 +44,8 @@ use libp2p::{ multiaddr, wasm_ext, Multiaddr, PeerId, }; use prometheus_endpoint::Registry; -use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; +use sp_consensus::{block_validation::BlockAnnounceValidator}; +use sc_consensus::ImportQueue; use sp_runtime::traits::Block as BlockT; use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; use std::{ diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index bdef28f9bebe5..f4f96b863d624 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -50,7 +50,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) struct PassThroughVerifier(bool); #[async_trait::async_trait] - impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( &mut self, origin: sp_consensus::BlockOrigin, @@ -59,7 +59,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) body: Option>, ) -> Result< ( - sp_consensus::BlockImportParams, + sc_consensus::BlockImportParams, Option)>>, ), String, @@ -79,16 +79,16 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) )] }); - let mut import = sp_consensus::BlockImportParams::new(origin, header); + let mut import = sc_consensus::BlockImportParams::new(origin, header); import.body = body; import.finalized = self.0; import.justifications = justifications; - import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + import.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); Ok((import, maybe_keys)) } } - let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + let import_queue = Box::new(sc_consensus::BasicQueue::new( PassThroughVerifier(false), Box::new(client.clone()), None, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index eaed7ffcccace..6b6f63c06d612 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -40,10 +40,9 @@ use message::generic::{Message as GenericMessage, Roles}; use prometheus_endpoint::{Registry, Gauge, GaugeVec, PrometheusError, Opts, register, U64}; use prost::Message as _; use sp_consensus::{ - BlockOrigin, - block_validation::BlockAnnounceValidator, - import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} + BlockOrigin, block_validation::BlockAnnounceValidator, }; +use sc_consensus::import_queue::{BlockImportStatus, BlockImportError, IncomingBlock, Origin}; use sp_runtime::{ Justifications, generic::BlockId, @@ -1040,7 +1039,7 @@ impl Protocol { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)> ) { let results = self.sync.on_blocks_processed( imported, diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 55b64c157c65e..d07ca57502eba 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -35,8 +35,8 @@ use state::StateSync; use sp_blockchain::{Error as ClientError, HeaderMetadata}; use sp_consensus::{BlockOrigin, BlockStatus, block_validation::{BlockAnnounceValidator, Validation}, - import_queue::{IncomingBlock, BlockImportResult, BlockImportError} }; +use sc_consensus::{IncomingBlock, BlockImportStatus, BlockImportError}; use crate::protocol::message::{ self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, }; @@ -1218,7 +1218,7 @@ impl ChainSync { &'a mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) -> impl Iterator), BadPeer>> + 'a { trace!(target: "sync", "Imported {} of {}", imported, count); @@ -1238,12 +1238,12 @@ impl ChainSync { } match result { - Ok(BlockImportResult::ImportedKnown(number, who)) => { + Ok(BlockImportStatus::ImportedKnown(number, who)) => { if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { peer.update_common_number(number); } } - Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { + Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( target: "sync", @@ -2756,7 +2756,7 @@ mod test { .map(|b| ( Ok( - BlockImportResult::ImportedUnknown( + BlockImportStatus::ImportedUnknown( b.header().number().clone(), Default::default(), Some(peer_id1.clone()), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index fb303312093cd..d2b555d9c8033 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -83,7 +83,7 @@ use log::{error, info, trace, debug, warn}; use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; use parking_lot::Mutex; use sc_peerset::PeersetHandle; -use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; +use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ @@ -1230,7 +1230,7 @@ impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle } } -impl sp_consensus::JustificationSyncLink for NetworkService { +impl sc_consensus::JustificationSyncLink for NetworkService { fn request_justification(&self, hash: &B::Hash, number: NumberFor) { NetworkService::request_justification(self, hash, number); } @@ -1927,7 +1927,7 @@ impl<'a, B: BlockT> Link for NetworkLink<'a, B> { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)> ) { self.protocol.behaviour_mut().user_protocol_mut().on_blocks_processed(imported, count, results) } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 4a739e50628a5..3836dc0ed8e44 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -50,7 +50,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) struct PassThroughVerifier(bool); #[async_trait::async_trait] - impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( &mut self, origin: sp_consensus::BlockOrigin, @@ -59,7 +59,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) body: Option>, ) -> Result< ( - sp_consensus::BlockImportParams, + sc_consensus::BlockImportParams, Option)>>, ), String, @@ -79,16 +79,16 @@ fn build_test_full_node(config: config::NetworkConfiguration) )] }); - let mut import = sp_consensus::BlockImportParams::new(origin, header); + let mut import = sc_consensus::BlockImportParams::new(origin, header); import.body = body; import.finalized = self.0; import.justifications = justifications; - import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + import.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); Ok((import, maybe_keys)) } } - let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + let import_queue = Box::new(sc_consensus::BasicQueue::new( PassThroughVerifier(false), Box::new(client.clone()), None, diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 6d3ceb4a933d8..599d3cbffbe66 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -18,9 +18,8 @@ //! Testing block import logic. -use sp_consensus::ImportedAux; -use sp_consensus::import_queue::{ - import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, +use sc_consensus::{ + ImportedAux, import_single_block, BasicQueue, BlockImportError, BlockImportStatus, IncomingBlock, }; use substrate_test_runtime_client::{self, prelude::*}; use substrate_test_runtime_client::runtime::{Block, Hash}; @@ -65,7 +64,7 @@ fn import_single_good_block_works() { block, &mut PassThroughVerifier::new(true) )) { - Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) + Ok(BlockImportStatus::ImportedUnknown(ref num, ref aux, ref org)) if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} r @ _ => panic!("{:?}", r) } @@ -80,7 +79,7 @@ fn import_single_good_known_block_is_ignored() { block, &mut PassThroughVerifier::new(true) )) { - Ok(BlockImportResult::ImportedKnown(ref n, _)) if *n == number => {} + Ok(BlockImportStatus::ImportedKnown(ref n, _)) if *n == number => {} _ => panic!() } } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 900e05e26a78f..7529df3ee0c38 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -37,19 +37,22 @@ use sp_blockchain::{ Info as BlockchainInfo, }; use sc_client_api::{ - BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, + BlockchainEvents, BlockImportNotification, FinalityNotifications, + ImportNotifications, FinalityNotification, backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend, }; use sc_consensus::LongestChain; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_network::config::Role; use sp_consensus::block_validation::{DefaultBlockAnnounceValidator, BlockAnnounceValidator}; -use sp_consensus::import_queue::{ +use sc_consensus::{ BasicQueue, BoxJustificationImport, Verifier, }; -use sp_consensus::block_import::{BlockImport, ImportResult}; -use sp_consensus::Error as ConsensusError; -use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; +use sp_consensus::{BlockOrigin, Error as ConsensusError}; +use sc_consensus::{ + BlockImport, ImportResult, ForkChoiceStrategy, BlockImportParams, + BlockCheckParams, JustificationImport +}; use futures::prelude::*; use futures::future::BoxFuture; use sc_network::{ @@ -150,7 +153,7 @@ pub enum PeersClient { impl PeersClient { pub fn as_full(&self) -> Option> { match *self { - PeersClient::Full(ref client, ref _backend) => Some(client.clone()), + PeersClient::Full(ref client, _) => Some(client.clone()), _ => None, } } @@ -161,22 +164,22 @@ impl PeersClient { pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { match *self { - PeersClient::Full(ref client, ref _backend) => client.get_aux(key), - PeersClient::Light(ref client, ref _backend) => client.get_aux(key), + PeersClient::Full(ref client, _) => client.get_aux(key), + PeersClient::Light(ref client, _) => client.get_aux(key), } } pub fn info(&self) -> BlockchainInfo { match *self { - PeersClient::Full(ref client, ref _backend) => client.chain_info(), - PeersClient::Light(ref client, ref _backend) => client.chain_info(), + PeersClient::Full(ref client, _) => client.chain_info(), + PeersClient::Light(ref client, _) => client.chain_info(), } } pub fn header(&self, block: &BlockId) -> ClientResult::Header>> { match *self { - PeersClient::Full(ref client, ref _backend) => client.header(block), - PeersClient::Light(ref client, ref _backend) => client.header(block), + PeersClient::Full(ref client, _) => client.header(block), + PeersClient::Light(ref client, _) => client.header(block), } } @@ -195,22 +198,22 @@ impl PeersClient { pub fn justifications(&self, block: &BlockId) -> ClientResult> { match *self { - PeersClient::Full(ref client, ref _backend) => client.justifications(block), - PeersClient::Light(ref client, ref _backend) => client.justifications(block), + PeersClient::Full(ref client, _) => client.justifications(block), + PeersClient::Light(ref client, _) => client.justifications(block), } } pub fn finality_notification_stream(&self) -> FinalityNotifications { match *self { - PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(), + PeersClient::Full(ref client, _) => client.finality_notification_stream(), + PeersClient::Light(ref client, _) => client.finality_notification_stream(), } } pub fn import_notification_stream(&self) -> ImportNotifications{ match *self { - PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), + PeersClient::Full(ref client, _) => client.import_notification_stream(), + PeersClient::Light(ref client, _) => client.import_notification_stream(), } } @@ -221,8 +224,8 @@ impl PeersClient { notify: bool ) -> ClientResult<()> { match *self { - PeersClient::Full(ref client, ref _backend) => client.finalize_block(id, justification, notify), - PeersClient::Light(ref client, ref _backend) => client.finalize_block(id, justification, notify), + PeersClient::Full(ref client, _) => client.finalize_block(id, justification, notify), + PeersClient::Light(ref client, _) => client.finalize_block(id, justification, notify), } } } @@ -288,7 +291,8 @@ impl Peer where } // Returns a clone of the local SelectChain, only available on full nodes - pub fn select_chain(&self) -> Option> { + pub fn select_chain(&self) + -> Option> { self.select_chain.clone() } @@ -352,7 +356,13 @@ impl Peer where headers_only: bool, inform_sync_about_new_best_block: bool, announce_block: bool, - ) -> H256 where F: FnMut(BlockBuilder) -> Block { + ) -> H256 + where + F: FnMut( + BlockBuilder + ) -> Block + { let full_client = self.client.as_full() .expect("blocks could only be generated by full clients"); let mut at = full_client.header(&at).unwrap().unwrap().hash(); @@ -618,10 +628,12 @@ impl Verifier for VerifierAdapter { body: Option> ) -> Result<(BlockImportParams, Option)>>), String> { let hash = header.hash(); - self.verifier.lock().await.verify(origin, header, justifications, body).await.map_err(|e| { - self.failed_verifications.lock().insert(hash, e.clone()); - e - }) + self.verifier.lock().await + .verify(origin, header, justifications, body).await + .map_err(|e| { + self.failed_verifications.lock().insert(hash, e.clone()); + e + }) } } @@ -664,7 +676,8 @@ pub struct FullPeerConfig { pub storage_chain: bool, } -pub trait TestNetFactory: Sized where >::Transaction: Send { +pub trait TestNetFactory: Sized where >::Transaction: Send +{ type Verifier: 'static + Verifier; type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default; @@ -1038,13 +1051,17 @@ pub trait TestNetFactory: Sized where >: trace!(target: "sync", "-- Polling complete {}: {}", i, peer.id()); // We poll `imported_blocks_stream`. - while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { + while let Poll::Ready(Some(notification)) = + peer.imported_blocks_stream.as_mut().poll_next(cx) + { peer.network.service().announce_block(notification.hash, None); } // We poll `finality_notification_stream`, but we only take the last event. let mut last = None; - while let Poll::Ready(Some(item)) = peer.finality_notification_stream.as_mut().poll_next(cx) { + while let Poll::Ready(Some(item)) = + peer.finality_notification_stream.as_mut().poll_next(cx) + { last = Some(item); } if let Some(notification) = last { @@ -1144,7 +1161,9 @@ impl TestNetFactory for JustificationTestNet { JustificationTestNet(TestNet::from_config(config)) } - fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) -> Self::Verifier { + fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) + -> Self::Verifier + { self.0.make_verifier(client, config, peer_data) } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index f998c9ebde757..1c0c5835679ab 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1162,4 +1162,3 @@ fn syncs_indexed_blocks() { net.block_until_sync(); assert!(net.peer(1).client().as_full().unwrap().indexed_transaction(&indexed_key).unwrap().is_some()); } - diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 977df259f5c41..e6f17f97ef285 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -44,6 +44,7 @@ sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } tokio = "0.2" lazy_static = "1.4.0" diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 67e78c8de8de9..04eb8b8b3f78e 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -53,6 +53,7 @@ substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/ru tokio = "0.1.22" sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sc-cli = { version = "0.10.0-dev", path = "../cli" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } [features] test-helpers = ["lazy_static"] diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index bb673d65ea0f2..dee772cc6017f 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -20,9 +20,9 @@ use super::*; use assert_matches::assert_matches; use substrate_test_runtime_client::{ prelude::*, - sp_consensus::BlockOrigin, runtime::{H256, Block, Header}, }; +use sp_consensus::BlockOrigin; use sp_rpc::list::ListOrValue; use sc_block_builder::BlockBuilderProvider; use futures::{executor, compat::{Future01CompatExt, Stream01CompatExt}}; diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index c9cb0bde89c1a..2b105817d1e21 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -29,9 +29,9 @@ use sc_block_builder::BlockBuilderProvider; use sp_io::hashing::blake2_256; use substrate_test_runtime_client::{ prelude::*, - sp_consensus::BlockOrigin, runtime, }; +use sp_consensus::BlockOrigin; use sc_rpc_api::DenyUnsafe; use sp_runtime::generic::BlockId; use crate::testing::TaskExecutor; diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 65393647f3ea4..17aa415363887 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -54,6 +54,7 @@ sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage" } sc-network = { version = "0.10.0-dev", path = "../network" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 46590ce8e8c6c..5ab4fa45517e9 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -30,8 +30,8 @@ use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sc_chain_spec::get_extension; use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain}, - import_queue::ImportQueue, }; +use sc_consensus::ImportQueue; use jsonrpc_pubsub::manager::SubscriptionManager; use futures::{ FutureExt, StreamExt, diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs index 94f6d25c9eb8f..fa3bed4263399 100644 --- a/client/service/src/chain_ops/check_block.rs +++ b/client/service/src/chain_ops/check_block.rs @@ -21,7 +21,7 @@ use futures::{future, prelude::*}; use sp_runtime::traits::Block as BlockT; use sp_runtime::generic::BlockId; use codec::Encode; -use sp_consensus::import_queue::ImportQueue; +use sc_consensus::ImportQueue; use sc_client_api::{BlockBackend, UsageProvider}; use std::pin::Pin; diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index 75ea6670f3525..4f93700831893 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -26,11 +26,10 @@ use sp_runtime::traits::{ }; use sp_runtime::generic::SignedBlock; use codec::{Decode, IoReader as CodecIoReader}; -use sp_consensus::{ - BlockOrigin, - import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, +use sp_consensus::BlockOrigin; +use sc_consensus::import_queue::{ + IncomingBlock, Link, BlockImportError, BlockImportStatus, ImportQueue }; - use std::{io::{Read, Seek}, pin::Pin}; use std::time::{Duration, Instant}; use futures_timer::Delay; @@ -318,7 +317,7 @@ where &mut self, imported: usize, _num_expected_blocks: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)> ) { self.imported_blocks += imported as u64; diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 8e808a3d824b8..79e5cc6bdddee 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -56,8 +56,11 @@ use sp_state_machine::{ }; use sc_executor::RuntimeVersion; use sp_consensus::{ - Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, - ImportResult, BlockOrigin, ForkChoiceStrategy, StateAction, + BlockOrigin, Error as ConsensusError, BlockStatus +}; +use sc_consensus::{ + BlockImportParams, BlockCheckParams, + ImportResult, ForkChoiceStrategy, StateAction, }; use sp_blockchain::{ self as blockchain, @@ -124,17 +127,18 @@ pub struct Client where Block: BlockT { _phantom: PhantomData, } -// used in importing a block, where additional changes are made after the runtime -// executed. +/// Used in importing a block, where additional changes are made after the runtime +/// executed. enum PrePostHeader { - // they are the same: no post-runtime digest items. + /// they are the same: no post-runtime digest items. Same(H), - // different headers (pre, post). + /// different headers (pre, post). Different(H, H), } impl PrePostHeader { - // get a reference to the "post-header" -- the header as it should be after all changes are applied. + /// get a reference to the "post-header" -- the header as it should be + /// after all changes are applied. fn post(&self) -> &H { match *self { PrePostHeader::Same(ref h) => h, @@ -142,7 +146,8 @@ impl PrePostHeader { } } - // convert to the "post-header" -- the header as it should be after all changes are applied. + /// convert to the "post-header" -- the header as it should be after + /// all changes are applied. fn into_post(self) -> H { match self { PrePostHeader::Same(h) => h, @@ -153,7 +158,7 @@ impl PrePostHeader { enum PrepareStorageChangesResult, Block: BlockT> { Discard(ImportResult), - Import(Option>>), + Import(Option>>), } /// Create an instance of in-memory client. @@ -235,7 +240,9 @@ pub fn new_with_backend( Block: BlockT, B: backend::LocalBackend + 'static, { - let call_executor = LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone())?; + let call_executor = LocalCallExecutor::new( + backend.clone(), executor, spawn_handle, config.clone())?; + let extensions = ExecutionExtensions::new( Default::default(), keystore, @@ -411,7 +418,9 @@ impl Client where id: &BlockId, cht_size: NumberFor, ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - let proof_error = || sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)); + let proof_error = || sp_blockchain::Error::Backend( + format!("Failed to generate header proof for {:?}", id)); + let header = self.backend.blockchain().expect_header(*id)?; let block_num = *header.number(); let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; @@ -572,7 +581,8 @@ impl Client where Ok(StorageProof::merge(proofs)) } - /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). + /// Generates CHT-based proof for roots of changes tries at given blocks + /// (that are part of single CHT). fn changes_trie_roots_proof_at_cht( &self, cht_size: NumberFor, @@ -600,11 +610,12 @@ impl Client where Ok(proof) } - /// Returns changes trie storage and all configurations that have been active in the range [first; last]. + /// Returns changes trie storage and all configurations that have been active + /// in the range [first; last]. /// /// Configurations are returned in descending order (and obviously never overlap). - /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and - /// stopping on either first, or when CT have been disabled. + /// If fail_if_disabled is false, returns maximal consequent configurations ranges, + /// starting from last and stopping on either first, or when CT have been disabled. /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled /// inside first..last blocks range. fn require_changes_trie( @@ -633,7 +644,8 @@ impl Client where break; } - current = *self.backend.blockchain().expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); + current = *self.backend.blockchain() + .expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); } Ok((storage, configs)) @@ -646,7 +658,9 @@ impl Client where operation: &mut ClientImportOperation, import_block: BlockImportParams>, new_cache: HashMap>, - storage_changes: Option>>, + storage_changes: Option< + sc_consensus::StorageChanges> + >, ) -> sp_blockchain::Result where Self: ProvideRuntimeApi, >::Api: CoreApi + @@ -738,7 +752,7 @@ impl Client where justifications: Option, body: Option>, indexed_body: Option>>, - storage_changes: Option>>, + storage_changes: Option>>, new_cache: HashMap>, finalized: bool, aux: Vec<(Vec, Option>)>, @@ -779,7 +793,7 @@ impl Client where let storage_changes = match storage_changes { Some(storage_changes) => { let storage_changes = match storage_changes { - sp_consensus::StorageChanges::Changes(storage_changes) => { + sc_consensus::StorageChanges::Changes(storage_changes) => { self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; let ( main_sc, @@ -804,7 +818,7 @@ impl Client where Some((main_sc, child_sc)) } - sp_consensus::StorageChanges::Import(changes) => { + sc_consensus::StorageChanges::Import(changes) => { let storage = sp_storage::Storage { top: changes.state.into_iter().collect(), children_default: Default::default(), @@ -882,7 +896,8 @@ impl Client where operation.op.insert_aux(aux)?; - // we only notify when we are already synced to the tip of the chain or if this import triggers a re-org + // we only notify when we are already synced to the tip of the chain + // or if this import triggers a re-org if make_notifications || tree_route.is_some() { if finalized { operation.notify_finalized.push(hash); @@ -919,10 +934,12 @@ impl Client where let at = BlockId::Hash(*parent_hash); let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { - (BlockStatus::Unknown, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), - (BlockStatus::KnownBad, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), + (BlockStatus::Unknown, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (BlockStatus::KnownBad, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes(_))) => + (BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_))) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::Execute) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), @@ -969,7 +986,7 @@ impl Client where { return Err(Error::InvalidStateRoot) } - Some(sp_consensus::StorageChanges::Changes(gen_storage_changes)) + Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, // No block body, no storage changes (true, None, None) => None, @@ -992,7 +1009,8 @@ impl Client where let last_finalized = self.backend.blockchain().last_finalized()?; if block == last_finalized { - warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized); + warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", + last_finalized); return Ok(()); } @@ -1199,12 +1217,16 @@ impl Client where } /// Get block header by id. - pub fn header(&self, id: &BlockId) -> sp_blockchain::Result::Header>> { + pub fn header(&self, id: &BlockId) + -> sp_blockchain::Result::Header>> + { self.backend.blockchain().header(*id) } /// Get block body by id. - pub fn body(&self, id: &BlockId) -> sp_blockchain::Result::Extrinsic>>> { + pub fn body(&self, id: &BlockId) + -> sp_blockchain::Result::Extrinsic>>> + { self.backend.blockchain().body(*id) } @@ -1316,7 +1338,9 @@ impl ProofProvider for Client where }) } - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + fn header_proof(&self, id: &BlockId) + -> sp_blockchain::Result<(Block::Header, StorageProof)> + { self.header_proof_with_cht_size(id, cht::size()) } @@ -1410,7 +1434,8 @@ impl BlockBuilderProvider for Client + Send + Sync + 'static, Block: BlockT, Self: ChainHeaderBackend + ProvideRuntimeApi, - >::Api: ApiExt> + >::Api: + ApiExt> + BlockBuilderApi, { fn new_block_at>( @@ -1466,7 +1491,9 @@ impl StorageProvider for Client wher E: CallExecutor, Block: BlockT, { - fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result> { + fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) + -> sp_blockchain::Result> + { let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); Ok(keys) } @@ -1673,7 +1700,9 @@ impl ProvideUncles for Client where E: CallExecutor, Block: BlockT, { - fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { + fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) + -> sp_blockchain::Result> + { Ok(Client::uncles(self, target_hash, max_generation)? .into_iter() .filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None)) @@ -1822,7 +1851,7 @@ impl CallApiAt for Client where /// objects. Otherwise, importing blocks directly into the client would be bypassing /// important verification work. #[async_trait::async_trait] -impl sp_consensus::BlockImport for &Client where +impl sc_consensus::BlockImport for &Client where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1931,7 +1960,7 @@ impl sp_consensus::BlockImport for &Client sp_consensus::BlockImport for Client where +impl sc_consensus::BlockImport for Client where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 40cb1aeea6a9e..93f319b250625 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -78,7 +78,7 @@ pub use sc_network::config::{ pub use sc_tracing::TracingReceiver; pub use task_manager::SpawnTaskHandle; pub use task_manager::TaskManager; -pub use sp_consensus::import_queue::ImportQueue; +pub use sc_consensus::ImportQueue; pub use self::client::{LocalCallExecutor, ClientConfig}; use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index e7e627f919c12..d0081b3249117 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -31,6 +31,7 @@ futures = { version = "0.3.1", features = ["compat"] } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-network = { version = "0.10.0-dev", path = "../../network" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index bdd693f57b2d0..ef7159333f7bf 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -46,9 +46,10 @@ use sp_api::ProvideRuntimeApi; use sp_core::{H256, ChangesTrieConfiguration, blake2_256, testing::TaskExecutor}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use sp_consensus::{ - BlockOrigin, SelectChain, BlockImport, Error as ConsensusError, BlockCheckParams, ImportResult, - BlockStatus, BlockImportParams, ForkChoiceStrategy, +use sp_consensus::{BlockOrigin, SelectChain, Error as ConsensusError, BlockStatus}; +use sc_consensus::{ + BlockImport, BlockCheckParams, ImportResult, + BlockImportParams, ForkChoiceStrategy, }; use sp_storage::{StorageKey, ChildInfo}; use sp_trie::{TrieConfiguration, trie_types::Layout}; diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 846bc68931bd0..b7b85ce084eb6 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -40,6 +40,7 @@ retain_mut = "0.1.3" assert_matches = "1.3.0" hex = "0.4" sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 5a9d1814bd63f..ab4f5a24f5c56 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -13,15 +13,14 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - [dependencies] -thiserror = "1.0.21" -libp2p = { version = "0.37.1", default-features = false } +async-trait = "0.1.42" +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +futures = { version = "0.3.1", features = ["thread-pool"] } log = "0.4.8" sp-core = { path= "../../core", version = "4.0.0-dev"} sp-inherents = { version = "4.0.0-dev", path = "../../inherents" } sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } -futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" sp-std = { version = "4.0.0-dev", path = "../../std" } sp-version = { version = "4.0.0-dev", path = "../../version" } @@ -29,12 +28,11 @@ sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } sp-utils = { version = "4.0.0-dev", path = "../../utils" } sp-trie = { version = "4.0.0-dev", path = "../../trie" } sp-api = { version = "4.0.0-dev", path = "../../api" } -codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} wasm-timer = "0.2.5" -async-trait = "0.1.50" +thiserror = "1.0.21" [dev-dependencies] futures = "0.3.9" diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 6cac6b1ff9201..12bd46f5e4ad6 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -15,275 +15,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Import Queue primitive: something which can verify and import blocks. -//! -//! This serves as an intermediate and abstracted step between synchronization -//! and import. Each mode of consensus will have its own requirements for block -//! verification. Some algorithms can verify in parallel, while others only -//! sequentially. -//! -//! The `ImportQueue` trait allows such verification strategies to be -//! instantiated. The `BasicQueue` and `BasicVerifier` traits allow serial -//! queues to be instantiated simply. - -use std::collections::HashMap; - -use sp_runtime::{Justifications, traits::{Block as BlockT, Header as _, NumberFor}}; - -use crate::{ - error::Error as ConsensusError, - block_import::{ - BlockImport, BlockOrigin, BlockImportParams, ImportedAux, JustificationImport, ImportResult, - BlockCheckParams, ImportedState, StateAction, - }, - metrics::Metrics, -}; -pub use basic_queue::BasicQueue; - -/// A commonly-used Import Queue type. -/// -/// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. -pub type DefaultImportQueue = BasicQueue>; - -mod basic_queue; -pub mod buffered_link; - -/// Shared block import struct used by the queue. -pub type BoxBlockImport = Box< - dyn BlockImport + Send + Sync ->; - -/// Shared justification import struct used by the queue. -pub type BoxJustificationImport = Box + Send + Sync>; - -/// Maps to the Origin used by the network. -pub type Origin = libp2p::PeerId; - -/// Block data used by the queue. -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct IncomingBlock { - /// Block header hash. - pub hash: ::Hash, - /// Block header if requested. - pub header: Option<::Header>, - /// Block body if requested. - pub body: Option::Extrinsic>>, - /// Indexed block body if requested. - pub indexed_body: Option>>, - /// Justification(s) if requested. - pub justifications: Option, - /// The peer, we received this from - pub origin: Option, - /// Allow importing the block skipping state verification if parent state is missing. - pub allow_missing_state: bool, - /// Skip block exection and state verification. - pub skip_execution: bool, - /// Re-validate existing block. - pub import_existing: bool, - /// Do not compute new state, but rather set it to the given set. - pub state: Option>, -} - /// Type of keys in the blockchain cache that consensus module could use for its needs. -pub type CacheKeyId = [u8; 4]; - -/// Verify a justification of a block -#[async_trait::async_trait] -pub trait Verifier: Send + Sync { - /// Verify the given data and return the BlockImportParams and an optional - /// new set of validators to import. If not, err with an Error-Message - /// presented to the User in the logs. - async fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String>; -} - -/// Blocks import queue API. -/// -/// The `import_*` methods can be called in order to send elements for the import queue to verify. -/// Afterwards, call `poll_actions` to determine how to respond to these elements. -pub trait ImportQueue: Send { - /// Import bunch of blocks. - fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); - /// Import block justifications. - fn import_justifications( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - justifications: Justifications - ); - /// Polls for actions to perform on the network. - /// - /// This method should behave in a way similar to `Future::poll`. It can register the current - /// task and notify later when more actions are ready to be polled. To continue the comparison, - /// it is as if this method always returned `Poll::Pending`. - fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); -} - -/// Hooks that the verification queue can use to influence the synchronization -/// algorithm. -pub trait Link: Send { - /// Batch of blocks imported, with or without error. - fn blocks_processed( - &mut self, - _imported: usize, - _count: usize, - _results: Vec<(Result>, BlockImportError>, B::Hash)> - ) {} - /// Justification import result. - fn justification_imported(&mut self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} - /// Request a justification for the given block. - fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} -} - -/// Block import successful result. -#[derive(Debug, PartialEq)] -pub enum BlockImportResult { - /// Imported known block. - ImportedKnown(N, Option), - /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), -} - -/// Block import error. -#[derive(Debug)] -pub enum BlockImportError { - /// Block missed header, can't be imported - IncompleteHeader(Option), - /// Block verification failed, can't be imported - VerificationFailed(Option, String), - /// Block is known to be Bad - BadBlock(Option), - /// Parent state is missing. - MissingState, - /// Block has an unknown parent - UnknownParent, - /// Block import has been cancelled. This can happen if the parent block fails to be imported. - Cancelled, - /// Other error. - Other(ConsensusError), -} - -/// Single block import function. -pub async fn import_single_block, Transaction: Send + 'static>( - import_handle: &mut impl BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: &mut V, -) -> Result>, BlockImportError> { - import_single_block_metered(import_handle, block_origin, block, verifier, None).await -} - -/// Single block import function with metering. -pub(crate) async fn import_single_block_metered, Transaction: Send + 'static>( - import_handle: &mut impl BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: &mut V, - metrics: Option, -) -> Result>, BlockImportError> { - let peer = block.origin; - - let (header, justifications) = match (block.header, block.justifications) { - (Some(header), justifications) => (header, justifications), - (None, _) => { - if let Some(ref peer) = peer { - debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); - } else { - debug!(target: "sync", "Header {} was not provided ", block.hash); - } - return Err(BlockImportError::IncompleteHeader(peer)) - }, - }; - - trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); - - let number = header.number().clone(); - let hash = header.hash(); - let parent_hash = header.parent_hash().clone(); - - let import_handler = |import| { - match import { - Ok(ImportResult::AlreadyInChain) => { - trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number, peer.clone())) - }, - Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), - Ok(ImportResult::MissingState) => { - debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::MissingState) - }, - Ok(ImportResult::UnknownParent) => { - debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::UnknownParent) - }, - Ok(ImportResult::KnownBad) => { - debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer.clone())) - }, - Err(e) => { - debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); - Err(BlockImportError::Other(e)) - } - } - }; - - match import_handler(import_handle.check_block(BlockCheckParams { - hash, - number, - parent_hash, - allow_missing_state: block.allow_missing_state, - import_existing: block.import_existing, - }).await)? { - BlockImportResult::ImportedUnknown { .. } => (), - r => return Ok(r), // Any other successful result means that the block is already imported. - } - - let started = wasm_timer::Instant::now(); - let (mut import_block, maybe_keys) = verifier.verify( - block_origin, - header, - justifications, - block.body - ).await.map_err(|msg| { - if let Some(ref peer) = peer { - trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); - } else { - trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); - } - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(false, started.elapsed()); - } - BlockImportError::VerificationFailed(peer.clone(), msg) - })?; - - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(true, started.elapsed()); - } - - let mut cache = HashMap::new(); - if let Some(keys) = maybe_keys { - cache.extend(keys.into_iter()); - } - import_block.import_existing = block.import_existing; - import_block.indexed_body = block.indexed_body; - let mut import_block = import_block.clear_storage_changes_and_mutate(); - if let Some(state) = block.state { - import_block.state_action = StateAction::ApplyChanges(crate::StorageChanges::Import(state)); - } else if block.skip_execution { - import_block.state_action = StateAction::Skip; - } else if block.allow_missing_state { - import_block.state_action = StateAction::ExecuteIfPossible; - } - - let imported = import_handle.import_block(import_block, cache).await; - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification_and_import(started.elapsed()); - } - import_handler(imported) -} +pub type CacheKeyId = [u8; 4]; \ No newline at end of file diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 51b2a96e17758..9a0459973b9ed 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -21,14 +21,6 @@ //! change. Implementors of traits should not rely on the interfaces to remain //! the same. -// This provides "unused" building blocks to other crates -#![allow(dead_code)] - -// our error-chain could potentially blow up otherwise -#![recursion_limit="128"] - -#[macro_use] extern crate log; - use std::sync::Arc; use std::time::Duration; @@ -40,21 +32,13 @@ use sp_state_machine::StorageProof; pub mod block_validation; pub mod error; -pub mod block_import; mod select_chain; pub mod import_queue; pub mod evaluation; -mod metrics; pub use self::error::Error; -pub use block_import::{ - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, - ImportResult, ImportedAux, ImportedState, JustificationImport, JustificationSyncLink, - StateAction, StorageChanges, -}; pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; -pub use import_queue::DefaultImportQueue; pub use sp_inherents::InherentData; /// Block status. @@ -72,6 +56,23 @@ pub enum BlockStatus { Unknown, } +/// Block data origin. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum BlockOrigin { + /// Genesis block built into the client. + Genesis, + /// Block is part of the initial sync with the network. + NetworkInitialSync, + /// Block was broadcasted on the network. + NetworkBroadcast, + /// Block that was received from the network and validated in the consensus process. + ConsensusBroadcast, + /// Block that was collated by this node. + Own, + /// Block was imported from a file. + File, +} + /// Environment for a Consensus instance. /// /// Creates proposer instance. diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index edba96d760fc2..b3f53e80f4565 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -20,10 +20,8 @@ use sc_service::client::Client; use sc_client_api::backend::Finalizer; use sc_client_api::client::BlockBackend; -use sp_consensus::{ - BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError, - ForkChoiceStrategy, -}; +use sc_consensus::{BlockImportParams, BlockImport, ForkChoiceStrategy}; +use sp_consensus::{BlockOrigin, Error as ConsensusError}; use sp_runtime::{Justification, Justifications}; use sp_runtime::traits::{Block as BlockT}; use sp_runtime::generic::BlockId; diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 2a4be6787dd7d..104322220fd6a 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -52,6 +52,8 @@ serde = { version = "1.0.126", optional = true, features = ["derive"] } [dev-dependencies] sc-block-builder = { version = "0.10.0-dev", path = "../../client/block-builder" } sc-executor = { version = "0.10.0-dev", path = "../../client/executor" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } futures = "0.3.9" diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 24e9f8af29442..9f1dc32a64ffb 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sc-light = { version = "4.0.0-dev", path = "../../../client/light" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } substrate-test-client = { version = "2.0.0", path = "../../client" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } @@ -23,6 +24,5 @@ sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "2.0.0" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } -sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service" } futures = "0.3.9" diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index f4c722ab12c2b..6393a33a5c252 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1260,11 +1260,11 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { #[cfg(test)] mod tests { use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, + prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, runtime::TestAPI, }; + use sp_consensus::BlockOrigin; use sp_api::ProvideRuntimeApi; use sp_runtime::generic::BlockId; use sp_core::storage::well_known_keys::HEAP_PAGES; diff --git a/test-utils/test-runner/src/lib.rs b/test-utils/test-runner/src/lib.rs index 1976d132b7c50..142e47870c312 100644 --- a/test-utils/test-runner/src/lib.rs +++ b/test-utils/test-runner/src/lib.rs @@ -230,7 +230,8 @@ use sc_executor::NativeExecutionDispatch; use sc_service::TFullClient; use sp_api::{ConstructRuntimeApi, TransactionFor}; -use sp_consensus::{BlockImport, SelectChain}; +use sp_consensus::SelectChain; +use sc_consensus::BlockImport; use sp_inherents::InherentDataProvider; use sp_runtime::traits::{Block as BlockT, SignedExtension};