From 4f484d7a0747ea0af8e36d1d2e577d7a20e02d4b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 8 Dec 2022 13:19:07 +0100 Subject: [PATCH 01/11] BlockId removal: refactor: HeaderBackend::header It changes the arguments of: - `HeaderBackend::header`, - `Client::header`, - `PeersClient::header` - `ChainApi::block_header` methods from: `BlockId` to: `Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) --- bin/node/bench/src/construct.rs | 2 +- bin/node/cli/src/service.rs | 5 +- bin/node/inspect/src/lib.rs | 7 +-- client/api/src/in_mem.rs | 10 ++-- .../authority-discovery/src/worker/tests.rs | 2 +- .../basic-authorship/src/basic_authorship.rs | 36 +++++------- client/consensus/aura/src/lib.rs | 4 +- client/consensus/babe/src/lib.rs | 4 +- client/consensus/babe/src/tests.rs | 40 ++++++------- client/consensus/common/src/longest_chain.rs | 7 +-- .../manual-seal/src/consensus/babe.rs | 4 +- .../manual-seal/src/consensus/timestamp.rs | 7 +-- client/consensus/manual-seal/src/lib.rs | 22 +++---- .../consensus/manual-seal/src/seal_block.rs | 7 +-- client/db/src/lib.rs | 57 ++++++++----------- client/finality-grandpa/src/environment.rs | 8 +-- client/finality-grandpa/src/import.rs | 8 +-- client/finality-grandpa/src/justification.rs | 7 +-- client/finality-grandpa/src/tests.rs | 10 ++-- client/finality-grandpa/src/voting_rule.rs | 28 ++++----- client/finality-grandpa/src/warp_proof.rs | 2 +- client/network/src/protocol.rs | 3 +- .../network/src/service/tests/chain_sync.rs | 2 +- client/network/sync/src/lib.rs | 2 +- client/network/test/src/block_import.rs | 3 +- client/network/test/src/lib.rs | 26 +++++---- client/network/test/src/sync.rs | 52 ++++++++--------- client/offchain/src/lib.rs | 2 +- client/rpc/src/chain/chain_full.rs | 4 +- client/rpc/src/chain/mod.rs | 10 ++-- client/rpc/src/dev/mod.rs | 2 +- client/service/src/builder.rs | 3 +- client/service/src/client/client.rs | 31 +++++----- client/service/test/src/client/mod.rs | 2 +- client/sync-state-rpc/src/lib.rs | 7 +-- client/tracing/src/block/mod.rs | 5 +- client/transaction-pool/benches/basics.rs | 2 +- client/transaction-pool/src/api.rs | 4 +- client/transaction-pool/src/graph/pool.rs | 2 +- client/transaction-pool/src/lib.rs | 2 +- client/transaction-pool/src/tests.rs | 2 +- primitives/blockchain/src/backend.rs | 10 ++-- primitives/session/src/lib.rs | 4 +- test-utils/runtime/client/src/trait_tests.rs | 11 ---- .../runtime/transaction-pool/src/lib.rs | 9 +-- .../benchmarking-cli/src/storage/write.rs | 7 +-- 46 files changed, 216 insertions(+), 268 deletions(-) diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index 2e20a7acb1e38..f994b29b5e843 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -143,7 +143,7 @@ impl core::Benchmark for ConstructionBenchmark { proposer_factory.init( &context .client - .header(&BlockId::number(0)) + .header(context.client.chain_info().genesis_hash) .expect("Database error querying block #0") .expect("Block #0 should exist"), ), diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e7b825a8e5ef1..cdee61af3f500 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -640,9 +640,8 @@ mod tests { Ok((node, setup_handles.unwrap())) }, |service, &mut (ref mut block_import, ref babe_link)| { - let parent_id = BlockId::number(service.client().chain_info().best_number); - let parent_header = service.client().header(&parent_id).unwrap().unwrap(); - let parent_hash = parent_header.hash(); + let parent_hash = service.client().chain_info().best_hash; + let parent_header = service.client().header(parent_hash).unwrap().unwrap(); let parent_number = *parent_header.number(); futures::executor::block_on(service.transaction_pool().maintain( diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 528dce14f46a5..3b9d7b2b1ff98 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -147,18 +147,17 @@ impl> Inspector .block_body(hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = - self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; + self.chain.header(hash)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; TBlock::new(header, body) }, BlockAddress::Hash(hash) => { - let id = BlockId::hash(hash); - let not_found = format!("Could not find block {:?}", id); + let not_found = format!("Could not find block {:?}", BlockId::::Hash(hash)); let body = self .chain .block_body(hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = - self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; + self.chain.header(hash)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; TBlock::new(header, body) }, }) diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 5a3e25ab5987b..58add087074b6 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -225,7 +225,7 @@ impl Blockchain { /// Set an existing block as head. pub fn set_head(&self, hash: Block::Hash) -> sp_blockchain::Result<()> { let header = self - .header(BlockId::Hash(hash))? + .header(hash)? .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash)))?; self.apply_head(&header) @@ -336,11 +336,9 @@ impl Blockchain { impl HeaderBackend for Blockchain { fn header( &self, - id: BlockId, + hash: Block::Hash, ) -> sp_blockchain::Result::Header>> { - Ok(self - .id(id) - .and_then(|hash| self.storage.read().blocks.get(&hash).map(|b| b.header().clone()))) + Ok(self.storage.read().blocks.get(&hash).map(|b| b.header().clone())) } fn info(&self) -> blockchain::Info { @@ -387,7 +385,7 @@ impl HeaderMetadata for Blockchain { &self, hash: Block::Hash, ) -> Result, Self::Error> { - self.header(BlockId::hash(hash))? + self.header(hash)? .map(|header| CachedHeaderMetadata::from(&header)) .ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash)) diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 8e7a221877574..ea0c16eca8063 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -58,7 +58,7 @@ impl ProvideRuntimeApi for TestApi { impl HeaderBackend for TestApi { fn header( &self, - _id: BlockId, + _hash: Block::Hash, ) -> std::result::Result, sp_blockchain::Error> { Ok(None) } diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index b69294bf6ccb0..6fb17971f142e 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -617,8 +617,7 @@ mod tests { block_on( txpool.maintain(chain_event( client - .header(&BlockId::Number(0u64)) - .expect("header get error") + .expect_header(BlockId::Number(0u64)) .expect("there should be header"), )), ); @@ -628,7 +627,7 @@ mod tests { let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(0)).unwrap().unwrap(), + &client.expect_header(BlockId::number(0)).unwrap(), Box::new(move || { let mut value = cell.lock(); if !value.0 { @@ -672,7 +671,7 @@ mod tests { let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(0)).unwrap().unwrap(), + &client.expect_header(BlockId::number(0)).unwrap(), Box::new(move || { let mut value = cell.lock(); if !value.0 { @@ -705,15 +704,13 @@ mod tests { ); let genesis_hash = client.info().best_hash; - let block_id = BlockId::Hash(genesis_hash); block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)])).unwrap(); block_on( txpool.maintain(chain_event( client - .header(&BlockId::Number(0u64)) - .expect("header get error") + .expect_header(BlockId::Number(0u64)) .expect("there should be header"), )), ); @@ -722,7 +719,7 @@ mod tests { ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let proposer = proposer_factory.init_with_now( - &client.header(&block_id).unwrap().unwrap(), + &client.header(genesis_hash).unwrap().unwrap(), Box::new(move || time::Instant::now()), ); @@ -734,7 +731,7 @@ mod tests { assert_eq!(proposal.block.extrinsics().len(), 1); let api = client.runtime_api(); - api.execute_block(&block_id, proposal.block).unwrap(); + api.execute_block(&BlockId::Hash(genesis_hash), proposal.block).unwrap(); let state = backend.state_at(genesis_hash).unwrap(); @@ -791,7 +788,7 @@ mod tests { expected_block_extrinsics, expected_pool_transactions| { let proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(number)).unwrap().unwrap(), + &client.expect_header(BlockId::number(number)).unwrap(), Box::new(move || time::Instant::now()), ); @@ -813,8 +810,7 @@ mod tests { block_on( txpool.maintain(chain_event( client - .header(&BlockId::Number(0u64)) - .expect("header get error") + .expect_header(BlockId::Number(0u64)) .expect("there should be header"), )), ); @@ -827,8 +823,7 @@ mod tests { block_on( txpool.maintain(chain_event( client - .header(&BlockId::Number(1)) - .expect("header get error") + .expect_header(BlockId::Number(1)) .expect("there should be header"), )), ); @@ -851,8 +846,7 @@ mod tests { client.clone(), ); let genesis_header = client - .header(&BlockId::Number(0u64)) - .expect("header get error") + .expect_header(BlockId::Number(0u64)) .expect("there should be header"); let extrinsics_num = 5; @@ -966,8 +960,7 @@ mod tests { block_on( txpool.maintain(chain_event( client - .header(&BlockId::Number(0u64)) - .expect("header get error") + .expect_header(BlockId::Number(0u64)) .expect("there should be header"), )), ); @@ -978,7 +971,7 @@ mod tests { let cell = Mutex::new(time::Instant::now()); let proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(0)).unwrap().unwrap(), + &client.expect_header(BlockId::number(0)).unwrap(), Box::new(move || { let mut value = cell.lock(); let old = *value; @@ -1029,8 +1022,7 @@ mod tests { block_on( txpool.maintain(chain_event( client - .header(&BlockId::Number(0u64)) - .expect("header get error") + .expect_header(BlockId::Number(0u64)) .expect("there should be header"), )), ); @@ -1043,7 +1035,7 @@ mod tests { let cell = Arc::new(Mutex::new((0, time::Instant::now()))); let cell2 = cell.clone(); let proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(0)).unwrap().unwrap(), + &client.expect_header(BlockId::number(0)).unwrap(), Box::new(move || { let mut value = cell.lock(); let (called, old) = *value; diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 46b9124f9077f..92edb4ff62f47 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -967,7 +967,7 @@ mod tests { compatibility_mode: Default::default(), }; - let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); + let head = client.expect_header(BlockId::Number(0)).unwrap(); let res = runtime .block_on(worker.on_slot(SlotInfo { @@ -981,6 +981,6 @@ mod tests { .unwrap(); // The returned block should be imported and we should be able to get its header by now. - assert!(client.header(&BlockId::Hash(res.block.hash())).unwrap().is_some()); + assert!(client.header(res.block.hash()).unwrap().is_some()); } } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 84b6893648f49..469214fa25638 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -1417,7 +1417,7 @@ where let parent_hash = *block.header.parent_hash(); let parent_header = self .client - .header(BlockId::Hash(parent_hash)) + .header(parent_hash) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? .ok_or_else(|| { ConsensusError::ChainLookup( @@ -1654,7 +1654,7 @@ where let finalized_slot = { let finalized_header = client - .header(BlockId::Hash(info.finalized_hash)) + .header(info.finalized_hash) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .expect( "best finalized hash was given by client; finalized headers must exist in db; qed", diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 7f51eb2c51977..bdc9f5c096113 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -678,18 +678,18 @@ fn propose_and_import_blocks( client: &PeersFullClient, proposer_factory: &mut DummyFactory, block_import: &mut BoxBlockImport, - parent_id: BlockId, + parent_hash: Hash, n: usize, runtime: &Runtime, ) -> Vec { let mut hashes = Vec::with_capacity(n); - let mut parent_header = client.header(&parent_id).unwrap().unwrap(); + let mut parent_header = client.header(parent_hash).unwrap().unwrap(); for _ in 0..n { let block_hash = propose_and_import_block(&parent_header, None, proposer_factory, block_import, runtime); hashes.push(block_hash); - parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap(); + parent_header = client.header(block_hash).unwrap().unwrap(); } hashes @@ -713,7 +713,7 @@ fn importing_block_one_sets_genesis_epoch() { let mut block_import = data.block_import.lock().take().expect("import set up during init"); - let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + let genesis_header = client.header(client.chain_info().genesis_hash).unwrap().unwrap(); let block_hash = propose_and_import_block( &genesis_header, @@ -779,10 +779,10 @@ fn revert_prunes_epoch_changes_and_removes_weights() { // \ revert *---- G(#13) ---- H(#19) ---#20 < fork #3 // \ to #10 // *-----E(#7)---#11 < fork #1 - let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 21); - let fork1 = propose_and_import_blocks_wrap(BlockId::Hash(canon[0]), 10); - let fork2 = propose_and_import_blocks_wrap(BlockId::Hash(canon[7]), 10); - let fork3 = propose_and_import_blocks_wrap(BlockId::Hash(canon[11]), 8); + let canon = propose_and_import_blocks_wrap(client.chain_info().genesis_hash, 21); + let fork1 = propose_and_import_blocks_wrap(canon[0], 10); + let fork2 = propose_and_import_blocks_wrap(canon[7], 10); + let fork3 = propose_and_import_blocks_wrap(canon[11], 8); // We should be tracking a total of 9 epochs in the fork tree assert_eq!(epoch_changes.shared_data().tree().iter().count(), 8); @@ -854,7 +854,7 @@ fn revert_not_allowed_for_finalized() { ) }; - let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 3); + let canon = propose_and_import_blocks_wrap(client.chain_info().genesis_hash, 3); // Finalize best block client.finalize_block(canon[2], None, false).unwrap(); @@ -912,12 +912,12 @@ fn importing_epoch_change_block_prunes_tree() { // Create and import the canon chain and keep track of fork blocks (A, C, D) // from the diagram above. - let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 30); + let canon = propose_and_import_blocks_wrap(client.chain_info().genesis_hash, 30); // Create the forks - let fork_1 = propose_and_import_blocks_wrap(BlockId::Hash(canon[0]), 10); - let fork_2 = propose_and_import_blocks_wrap(BlockId::Hash(canon[12]), 15); - let fork_3 = propose_and_import_blocks_wrap(BlockId::Hash(canon[18]), 10); + let fork_1 = propose_and_import_blocks_wrap(canon[0], 10); + let fork_2 = propose_and_import_blocks_wrap(canon[12], 15); + let fork_3 = propose_and_import_blocks_wrap(canon[18], 10); // We should be tracking a total of 9 epochs in the fork tree assert_eq!(epoch_changes.shared_data().tree().iter().count(), 9); @@ -928,7 +928,7 @@ fn importing_epoch_change_block_prunes_tree() { // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). client.finalize_block(canon[12], None, false).unwrap(); - propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 7); + propose_and_import_blocks_wrap(client.chain_info().best_hash, 7); let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect(); @@ -941,7 +941,7 @@ fn importing_epoch_change_block_prunes_tree() { // finalizing block #25 from the canon chain should prune out the second fork client.finalize_block(canon[24], None, false).unwrap(); - propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 8); + propose_and_import_blocks_wrap(client.chain_info().best_hash, 8); let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect(); @@ -973,7 +973,7 @@ fn verify_slots_are_strictly_increasing() { mutator: Arc::new(|_, _| ()), }; - let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + let genesis_header = client.header(client.chain_info().genesis_hash).unwrap().unwrap(); // we should have no issue importing this block let b1 = propose_and_import_block( @@ -984,7 +984,7 @@ fn verify_slots_are_strictly_increasing() { &runtime, ); - let b1 = client.header(&BlockId::Hash(b1)).unwrap().unwrap(); + let b1 = client.header(b1).unwrap().unwrap(); // we should fail to import this block since the slot number didn't increase. // we will panic due to the `PanickingBlockImport` defined above. @@ -1077,9 +1077,9 @@ fn obsolete_blocks_aux_data_cleanup() { // G --- A1 --- A2 --- A3 --- A4 ( < fork1 ) // \-----C4 --- C5 ( < fork3 ) - let fork1_hashes = propose_and_import_blocks_wrap(BlockId::Number(0), 4); - let fork2_hashes = propose_and_import_blocks_wrap(BlockId::Number(0), 2); - let fork3_hashes = propose_and_import_blocks_wrap(BlockId::Number(3), 2); + let fork1_hashes = propose_and_import_blocks_wrap(client.chain_info().genesis_hash, 4); + let fork2_hashes = propose_and_import_blocks_wrap(client.chain_info().genesis_hash, 2); + let fork3_hashes = propose_and_import_blocks_wrap(fork1_hashes[2], 2); // Check that aux data is present for all but the genesis block. assert!(aux_data_check(&[client.chain_info().genesis_hash], false)); diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs index 941cd4b944766..fab2c3a4c06fd 100644 --- a/client/consensus/common/src/longest_chain.rs +++ b/client/consensus/common/src/longest_chain.rs @@ -21,10 +21,7 @@ use sc_client_api::backend; use sp_blockchain::{Backend, HeaderBackend}; use sp_consensus::{Error as ConsensusError, SelectChain}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{marker::PhantomData, sync::Arc}; /// Implement Longest Chain Select implementation @@ -63,7 +60,7 @@ where Ok(self .backend .blockchain() - .header(BlockId::Hash(best_hash))? + .header(best_hash)? .expect("given block hash was fetched from block in db; qed")) } diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 206f5163a13cd..f98808e2c870e 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -44,7 +44,7 @@ use sp_consensus_babe::{ use sp_consensus_slots::Slot; use sp_inherents::InherentData; use sp_runtime::{ - generic::{BlockId, Digest}, + generic::Digest, traits::{Block as BlockT, Header}, DigestItem, }; @@ -108,7 +108,7 @@ where let parent_hash = import_params.header.parent_hash(); let parent = self .client - .header(BlockId::Hash(*parent_hash)) + .header(*parent_hash) .ok() .flatten() .ok_or_else(|| format!("header for block {} not found", parent_hash))?; diff --git a/client/consensus/manual-seal/src/consensus/timestamp.rs b/client/consensus/manual-seal/src/consensus/timestamp.rs index 2cb9887a81f40..b3c2d780c35d9 100644 --- a/client/consensus/manual-seal/src/consensus/timestamp.rs +++ b/client/consensus/manual-seal/src/consensus/timestamp.rs @@ -30,10 +30,7 @@ use sp_consensus_aura::{ use sp_consensus_babe::BabeApi; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Zero}, -}; +use sp_runtime::traits::{Block as BlockT, Zero}; use sp_timestamp::{InherentType, INHERENT_IDENTIFIER}; use std::{ sync::{atomic, Arc}, @@ -109,7 +106,7 @@ impl SlotTimestampProvider { // otherwise we'd be producing blocks for older slots. let time = if info.best_number != Zero::zero() { let header = client - .header(BlockId::Hash(info.best_hash))? + .header(info.best_hash)? .ok_or_else(|| "best header not found in the db!".to_string())?; let slot = func(header)?; // add the slot duration so there's no collision of slots diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 09ab139b91c73..a5cc3007ce8ea 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -358,7 +358,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); + let genesis_hash = client.info().genesis_hash; let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -422,7 +422,8 @@ mod tests { } ); // assert that there's a new block in the db. - assert!(client.header(&BlockId::Number(1)).unwrap().is_some()) + assert!(client.header(created_block.hash).unwrap().is_some()); + assert_eq!(client.header(created_block.hash).unwrap().unwrap().number, 1) } #[tokio::test] @@ -431,7 +432,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); + let genesis_hash = client.info().genesis_hash; let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -492,7 +493,7 @@ mod tests { } ); // assert that there's a new block in the db. - let header = client.header(&BlockId::Number(1)).unwrap().unwrap(); + let header = client.header(created_block.hash).unwrap().unwrap(); let (tx, rx) = futures::channel::oneshot::channel(); sink.send(EngineCommand::FinalizeBlock { sender: Some(tx), @@ -516,7 +517,7 @@ mod tests { &sp_core::testing::TaskExecutor::new(), )); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); + let genesis_hash = client.info().genesis_hash; let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -580,7 +581,8 @@ mod tests { assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); - let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above"); + let header = client.header(created_block.hash).expect("db error").expect("imported above"); + assert_eq!(header.number, 1); pool.maintain(sc_transaction_pool_api::ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None, @@ -612,7 +614,7 @@ mod tests { .is_ok()); let imported = rx2.await.unwrap().unwrap(); // assert that fork block is in the db - assert!(client.header(&BlockId::Hash(imported.hash)).unwrap().is_some()) + assert!(client.header(imported.hash).unwrap().is_some()) } #[tokio::test] @@ -621,7 +623,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); + let genesis_hash = client.header(client.info().genesis_hash).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -663,7 +665,7 @@ mod tests { let created_block = rx.await.unwrap().unwrap(); // assert that the background task returned the actual header hash - let header = client.header(&BlockId::Number(1)).unwrap().unwrap(); - assert_eq!(header.hash(), created_block.hash); + let header = client.header(created_block.hash).unwrap().unwrap(); + assert_eq!(header.number, 1); } } diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 25c091bf460ec..d7289bdbe76f2 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -26,10 +26,7 @@ use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::HeaderBackend; use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{collections::HashMap, sync::Arc, time::Duration}; /// max duration for creating a proposal in secs @@ -103,7 +100,7 @@ pub async fn seal_block( // or fetch the best_block. let parent = match parent_hash { Some(hash) => client - .header(BlockId::Hash(hash))? + .header(hash)? .ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?, None => select_chain.best_chain().await?, }; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 426876f5cba8c..14e4304198c3f 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -524,21 +524,15 @@ impl BlockchainDb { } impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { - fn header(&self, id: BlockId) -> ClientResult> { - match &id { - BlockId::Hash(h) => { - let mut cache = self.header_cache.lock(); - if let Some(result) = cache.get_refresh(h) { - return Ok(result.clone()) - } - let header = - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; - cache_header(&mut cache, *h, header.clone()); - Ok(header) - }, - BlockId::Number(_) => - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id), + fn header(&self, hash: Block::Hash) -> ClientResult> { + let mut cache = self.header_cache.lock(); + if let Some(result) = cache.get_refresh(&hash) { + return Ok(result.clone()) } + let header = + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::::Hash(hash))?; + cache_header(&mut cache, hash, header.clone()); + Ok(header) } fn info(&self) -> sc_client_api::blockchain::Info { @@ -557,7 +551,7 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha fn status(&self, id: BlockId) -> ClientResult { let exists = match id { - BlockId::Hash(_) => self.header(id)?.is_some(), + BlockId::Hash(hash) => self.header(hash)?.is_some(), BlockId::Number(n) => n <= self.meta.read().best_number, }; match exists { @@ -571,8 +565,8 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } fn hash(&self, number: NumberFor) -> ClientResult> { - self.header(BlockId::Number(number)) - .map(|maybe_header| maybe_header.map(|header| header.hash())) + Ok(utils::read_header::(&*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Number(number))? + .map(|header| header.hash())) } } @@ -736,7 +730,7 @@ impl HeaderMetadata for BlockchainDb { ) -> Result, Self::Error> { self.header_metadata_cache.header_metadata(hash).map_or_else( || { - self.header(BlockId::hash(hash))? + self.header(hash)? .map(|header| { let header_metadata = CachedHeaderMetadata::from(&header); self.header_metadata_cache @@ -1395,7 +1389,7 @@ impl Backend { .map(|(n, _)| n) .unwrap_or(Zero::zero()); let existing_header = - number <= highest_leaf && self.blockchain.header(BlockId::hash(hash))?.is_some(); + number <= highest_leaf && self.blockchain.header(hash)?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1617,7 +1611,7 @@ impl Backend { } else if number > best_num + One::one() && number > One::one() && self .blockchain - .header(BlockId::hash(parent_hash))? + .header(parent_hash)? .is_none() { let gap = (best_num + One::one(), number - One::one()); @@ -1642,7 +1636,7 @@ impl Backend { if let Some(set_head) = operation.set_head { if let Some(header) = sc_client_api::blockchain::HeaderBackend::header( &self.blockchain, - BlockId::Hash(set_head), + set_head, )? { let number = header.number(); let hash = header.hash(); @@ -1767,10 +1761,9 @@ impl Backend { // that are canonical, but not yet finalized. So we stop deleting as soon as // we reach canonical chain. while self.blockchain.hash(number)? != Some(hash) { - let id = BlockId::::hash(hash); - match self.blockchain.header(id)? { + match self.blockchain.header(hash)? { Some(header) => { - self.prune_block(transaction, id)?; + self.prune_block(transaction, BlockId::::hash(hash))?; number = header.number().saturating_sub(One::one()); hash = *header.parent_hash(); }, @@ -2141,7 +2134,7 @@ impl sc_client_api::backend::Backend for Backend { } let mut transaction = Transaction::new(); let removed = - self.blockchain.header(BlockId::Hash(hash_to_revert))?.ok_or_else(|| { + self.blockchain.header(hash_to_revert)?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!( "Error reverting to {}. Block header not found.", hash_to_revert, @@ -3167,7 +3160,7 @@ pub(crate) mod tests { }; { - let header = backend.blockchain().header(BlockId::Hash(hash1)).unwrap().unwrap(); + let header = backend.blockchain().header(hash1).unwrap().unwrap(); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); backend.commit_operation(op).unwrap(); @@ -3574,26 +3567,26 @@ pub(crate) mod tests { assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]); assert!(backend.have_state_at(blocks[3], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_some()); + assert!(backend.blockchain().header(blocks[3]).unwrap().is_some()); backend.remove_leaf_block(blocks[3]).unwrap(); assert!(!backend.have_state_at(blocks[3], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_none()); + assert!(backend.blockchain().header(blocks[3]).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]); assert!(backend.have_state_at(blocks[2], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_some()); + assert!(backend.blockchain().header(blocks[2]).unwrap().is_some()); backend.remove_leaf_block(blocks[2]).unwrap(); assert!(!backend.have_state_at(blocks[2], 2)); - assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_none()); + assert!(backend.blockchain().header(blocks[2]).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]); assert!(backend.have_state_at(blocks[1], 1)); - assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_some()); + assert!(backend.blockchain().header(blocks[1]).unwrap().is_some()); backend.remove_leaf_block(blocks[1]).unwrap(); assert!(!backend.have_state_at(blocks[1], 1)); - assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_none()); + assert!(backend.blockchain().header(blocks[1]).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]); assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]); } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index f235c3a86c04e..c9d76f932cc7f 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -525,7 +525,7 @@ where Some((_, n)) if n > best_block_number => best_block_hash, Some((h, _)) => { // this is the header at which the new set will start - let header = self.client.header(BlockId::Hash(h))?.expect( + let header = self.client.header(h)?.expect( "got block hash from registered pending change; \ pending changes are only registered on block import; qed.", ); @@ -1155,7 +1155,7 @@ where SelectChain: SelectChainT + 'static, VotingRule: VotingRuleT, { - let base_header = match client.header(BlockId::Hash(block))? { + let base_header = match client.header(block)? { Some(h) => h, None => { debug!(target: "afg", @@ -1177,7 +1177,7 @@ where let result = match select_chain.finality_target(block, None).await { Ok(best_hash) => { let best_header = client - .header(BlockId::Hash(best_hash))? + .header(best_hash)? .expect("Header known to exist after `finality_target` call; qed"); // check if our vote is currently being limited due to a pending change @@ -1201,7 +1201,7 @@ where } target_header = client - .header(BlockId::Hash(*target_header.parent_hash()))? + .header(*target_header.parent_hash())? .expect("Header known to exist after `finality_target` call; qed"); } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 3715287eea31f..4e7620f942bd8 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -121,7 +121,7 @@ where }; if let Ok(hash) = effective_block_hash { - if let Ok(Some(header)) = self.inner.header(BlockId::Hash(hash)) { + if let Ok(Some(header)) = self.inner.header(hash) { if *header.number() == pending_change.effective_number() { out.push((header.hash(), *header.number())); } @@ -363,14 +363,12 @@ where // best finalized block. let best_finalized_number = self.inner.info().finalized_number; let canon_number = best_finalized_number.min(median_last_finalized_number); - let canon_hash = - self.inner.header(BlockId::Number(canon_number)) + let canon_hash = self.inner.hash(canon_number) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .expect( "the given block number is less or equal than the current best finalized number; \ current best finalized number must exist in chain; qed." - ) - .hash(); + ); NewAuthoritySet { canon_number, diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 56b26c964ce9b..96c31d4c0cc29 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -26,10 +26,7 @@ use finality_grandpa::{voter_set::VoterSet, Error as GrandpaError}; use parity_scale_codec::{Decode, Encode}; use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_finality_grandpa::AuthorityId; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use crate::{AuthorityList, Commit, Error}; @@ -104,7 +101,7 @@ impl GrandpaJustification { break } - match client.header(BlockId::Hash(current_hash))? { + match client.header(current_hash)? { Some(current_header) => { // NOTE: this should never happen as we pick the lowest block // as base and only traverse backwards from the other blocks diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 6b577fd712930..2d9434cd2132f 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1563,10 +1563,10 @@ fn justification_with_equivocation() { // we create a basic chain with 3 blocks (no forks) net.peer(0).push_blocks(3, false); - let client = net.peer(0).client().clone(); - let block1 = client.header(&BlockId::Number(1)).ok().flatten().unwrap(); - let block2 = client.header(&BlockId::Number(2)).ok().flatten().unwrap(); - let block3 = client.header(&BlockId::Number(3)).ok().flatten().unwrap(); + let client = net.peer(0).client().as_client().clone(); + let block1 = client.expect_header(BlockId::Number(1)).unwrap(); + let block2 = client.expect_header(BlockId::Number(2)).unwrap(); + let block3 = client.expect_header(BlockId::Number(3)).unwrap(); let set_id = 0; let justification = { @@ -1609,7 +1609,7 @@ fn justification_with_equivocation() { precommits, }; - GrandpaJustification::from_commit(&client.as_client(), round, commit).unwrap() + GrandpaJustification::from_commit(&client, round, commit).unwrap() }; // the justification should include the minimal necessary vote ancestry and diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index fb7754fc0169a..6f15a9cdd349e 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -27,10 +27,7 @@ use std::{future::Future, pin::Pin, sync::Arc}; use dyn_clone::DynClone; use sc_client_api::blockchain::HeaderBackend; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header, NumberFor, One, Zero}, -}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, Zero}; /// A future returned by a `VotingRule` to restrict a given vote, if any restriction is necessary. pub type VotingRuleResult = @@ -197,7 +194,7 @@ where target_hash = *target_header.parent_hash(); target_header = backend - .header(BlockId::Hash(target_hash)) + .header(target_hash) .ok()? .expect("Header known to exist due to the existence of one of its descendents; qed"); } @@ -242,7 +239,7 @@ where restricted_number >= base.number() && restricted_number < restricted_target.number() }) - .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) + .and_then(|(hash, _)| backend.header(hash).ok()) .and_then(std::convert::identity) { restricted_target = header; @@ -371,16 +368,18 @@ mod tests { let rule = VotingRulesBuilder::new().add(Subtract(50)).add(Subtract(50)).build(); let mut client = Arc::new(TestClientBuilder::new().build()); + let mut hashes = Vec::with_capacity(200); for _ in 0..200 { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + hashes.push(block.hash()); futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } - let genesis = client.header(&BlockId::Number(0u32.into())).unwrap().unwrap(); + let genesis = client.header(client.info().genesis_hash).unwrap().unwrap(); - let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); + let best = client.header(client.info().best_hash).unwrap().unwrap(); let (_, number) = futures::executor::block_on(rule.restrict_vote(client.clone(), &genesis, &best, &best)) @@ -390,7 +389,7 @@ mod tests { // which means that we should be voting for block #100 assert_eq!(number, 100); - let block110 = client.header(&BlockId::Number(110u32.into())).unwrap().unwrap(); + let block110 = client.header(hashes[109]).unwrap().unwrap(); let (_, number) = futures::executor::block_on(rule.restrict_vote( client.clone(), @@ -412,17 +411,20 @@ mod tests { let mut client = Arc::new(TestClientBuilder::new().build()); - for _ in 0..5 { + let n = 5; + let mut hashes = Vec::with_capacity(n); + for _ in 0..n { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + hashes.push(block.hash()); futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } - let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); + let best = client.header(client.info().best_hash).unwrap().unwrap(); let best_number = *best.number(); - for i in 0u32..5 { - let base = client.header(&BlockId::Number(i.into())).unwrap().unwrap(); + for i in 0..n { + let base = client.header(hashes[i]).unwrap().unwrap(); let (_, number) = futures::executor::block_on(rule.restrict_vote( client.clone(), &base, diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index c9f762fc7d593..43d9767bf279b 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -172,7 +172,7 @@ impl WarpSyncProof { }); if let Some(latest_justification) = latest_justification { - let header = blockchain.header(BlockId::Hash(latest_justification.target().1))? + let header = blockchain.header(latest_justification.target().1)? .expect("header hash corresponds to a justification in db; must exist in db as well; qed."); proofs.push(WarpSyncFragment { header, justification: latest_justification }) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 8c1dd39b49be3..42eecbe0feb39 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -52,7 +52,6 @@ use sc_network_common::{ use sp_arithmetic::traits::SaturatedConversion; use sp_consensus::BlockOrigin; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, Justifications, }; @@ -638,7 +637,7 @@ where /// In chain-based consensus, we often need to make sure non-best forks are /// at least temporarily synced. pub fn announce_block(&mut self, hash: B::Hash, data: Option>) { - let header = match self.chain.header(BlockId::Hash(hash)) { + let header = match self.chain.header(hash) { Ok(Some(header)) => header, Ok(None) => { warn!("Trying to announce unknown block: {}", hash); diff --git a/client/network/src/service/tests/chain_sync.rs b/client/network/src/service/tests/chain_sync.rs index bd4967f25973a..a3b221a43d04e 100644 --- a/client/network/src/service/tests/chain_sync.rs +++ b/client/network/src/service/tests/chain_sync.rs @@ -198,7 +198,7 @@ async fn on_block_finalized() { let mut chain_sync = Box::new(MockChainSync::::new()); - let at = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap().hash(); + let at = client.header(client.info().best_hash).unwrap().unwrap().hash(); let block = client .new_block_at(&BlockId::Hash(at), Default::default(), false) .unwrap() diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 697445334a073..349c73a4dde11 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -1186,7 +1186,7 @@ where heads.sort(); let median = heads[heads.len() / 2]; if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { - if let Ok(Some(header)) = self.client.header(BlockId::hash(*hash)) { + if let Ok(Some(header)) = self.client.header(*hash) { log::debug!( target: "sync", "Starting state sync for #{} ({})", diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index b86f6787f30b5..250f5f5fd102c 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -26,7 +26,6 @@ use sc_consensus::{ IncomingBlock, }; use sp_consensus::BlockOrigin; -use sp_runtime::generic::BlockId; use substrate_test_runtime_client::{ self, prelude::*, @@ -39,7 +38,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) block_on(client.import(BlockOrigin::File, block)).unwrap(); let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); - let header = client.header(&BlockId::Number(1)).unwrap(); + let header = client.header(hash).unwrap(); let justifications = client.justifications(hash).unwrap(); let peer_id = PeerId::random(); ( diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index d3642e69cb632..43b36e361e4ee 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -163,17 +163,23 @@ impl PeersClient { pub fn header( &self, - block: &BlockId, + hash: ::Hash, ) -> ClientResult::Header>> { - self.client.header(block) + self.client.header(hash) } pub fn has_state_at(&self, block: &BlockId) -> bool { - let header = match self.header(block).unwrap() { - Some(header) => header, - None => return false, + let (number, hash) = match *block { + BlockId::Hash(h) => match self.as_client().number(h) { + Ok(Some(n)) => (n,h), + _ => return false, + } + BlockId::Number(n) => match self.as_client().hash(n) { + Ok(Some(h)) => (n,h), + _ => return false, + } }; - self.backend.have_state_at(header.hash(), *header.number()) + self.backend.have_state_at(hash, number) } pub fn justifications( @@ -361,7 +367,7 @@ where ) -> Block, { let full_client = self.client.as_client(); - let mut at = full_client.header(&at).unwrap().unwrap().hash(); + let mut at = full_client.block_hash_from_id(&at).unwrap().unwrap(); for _ in 0..count { let builder = full_client.new_block_at(&BlockId::Hash(at), Default::default(), false).unwrap(); @@ -397,7 +403,7 @@ where if inform_sync_about_new_best_block { self.network.new_best_block_imported( at, - *full_client.header(&BlockId::Hash(at)).ok().flatten().unwrap().number(), + *full_client.header(at).ok().flatten().unwrap().number(), ); } at @@ -539,7 +545,7 @@ where pub fn has_block(&self, hash: H256) -> bool { self.backend .as_ref() - .map(|backend| backend.blockchain().header(BlockId::hash(hash)).unwrap().is_some()) + .map(|backend| backend.blockchain().header(hash).unwrap().is_some()) .unwrap_or(false) } @@ -663,7 +669,7 @@ impl WarpSyncProvider for TestWarpSyncProvider { _start: B::Hash, ) -> Result> { let info = self.0.info(); - let best_header = self.0.header(BlockId::hash(info.best_hash)).unwrap().unwrap(); + let best_header = self.0.header(info.best_hash).unwrap().unwrap(); Ok(EncodedProof(best_header.encode())) } fn verify( diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index efe0e0577c11e..22598345d037c 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -259,9 +259,9 @@ fn sync_justifications() { runtime.block_on(net.wait_until_sync()); let backend = net.peer(0).client().as_backend(); - let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); - let hashof15 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(15)).unwrap(); - let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); + let hashof10 = backend.blockchain().hash(10).unwrap().unwrap(); + let hashof15 = backend.blockchain().hash(15).unwrap().unwrap(); + let hashof20 = backend.blockchain().hash(20).unwrap().unwrap(); // there's currently no justification for block #10 assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); @@ -273,9 +273,9 @@ fn sync_justifications() { net.peer(0).client().finalize_block(hashof15, Some(just.clone()), true).unwrap(); net.peer(0).client().finalize_block(hashof20, Some(just.clone()), true).unwrap(); - let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); - let hashof15 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap().hash(); - let hashof20 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap().hash(); + let hashof10 = net.peer(1).client().as_client().hash(10).unwrap().unwrap(); + let hashof15 = net.peer(1).client().as_client().hash(15).unwrap().unwrap(); + let hashof20 = net.peer(1).client().as_client().hash(20).unwrap().unwrap(); // peer 1 should get the justifications from the network net.peer(1).request_justification(&hashof10, 10); @@ -417,8 +417,8 @@ fn can_sync_small_non_best_forks() { net.peer(1).push_blocks(10, false); assert_eq!(net.peer(1).client().info().best_number, 40); - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); + assert!(net.peer(0).client().header(small_hash).unwrap().is_some()); + assert!(net.peer(1).client().header(small_hash).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { @@ -434,8 +434,8 @@ fn can_sync_small_non_best_forks() { assert_eq!(net.peer(0).client().info().best_number, 40); - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + assert!(net.peer(0).client().header(small_hash).unwrap().is_some()); + assert!(!net.peer(1).client().header(small_hash).unwrap().is_some()); net.peer(0).announce_block(small_hash, None); @@ -444,8 +444,8 @@ fn can_sync_small_non_best_forks() { runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { + assert!(net.peer(0).client().header(small_hash).unwrap().is_some()); + if net.peer(1).client().header(small_hash).unwrap().is_none() { return Poll::Pending } Poll::Ready(()) @@ -456,7 +456,7 @@ fn can_sync_small_non_best_forks() { net.peer(0).announce_block(another_fork, None); runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { + if net.peer(1).client().header(another_fork).unwrap().is_none() { return Poll::Pending } Poll::Ready(()) @@ -481,7 +481,7 @@ fn can_sync_forks_ahead_of_the_best_chain() { ); // Peer 1 is on 1-block fork net.peer(1).push_blocks(1, false); - assert!(net.peer(0).client().header(&BlockId::Hash(fork_hash)).unwrap().is_some()); + assert!(net.peer(0).client().header(fork_hash).unwrap().is_some()); assert_eq!(net.peer(0).client().info().best_number, 1); assert_eq!(net.peer(1).client().info().best_number, 2); @@ -489,7 +489,7 @@ fn can_sync_forks_ahead_of_the_best_chain() { runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(fork_hash)).unwrap().is_none() { + if net.peer(1).client().header(fork_hash).unwrap().is_none() { return Poll::Pending } Poll::Ready(()) @@ -515,8 +515,8 @@ fn can_sync_explicit_forks() { net.peer(1).push_blocks(10, false); assert_eq!(net.peer(1).client().info().best_number, 40); - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); + assert!(net.peer(0).client().header(small_hash).unwrap().is_some()); + assert!(net.peer(1).client().header(small_hash).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { @@ -532,8 +532,8 @@ fn can_sync_explicit_forks() { assert_eq!(net.peer(0).client().info().best_number, 40); - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + assert!(net.peer(0).client().header(small_hash).unwrap().is_some()); + assert!(!net.peer(1).client().header(small_hash).unwrap().is_some()); // request explicit sync let first_peer_id = net.peer(0).id(); @@ -543,8 +543,8 @@ fn can_sync_explicit_forks() { runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { + assert!(net.peer(0).client().header(small_hash).unwrap().is_some()); + if net.peer(1).client().header(small_hash).unwrap().is_none() { return Poll::Pending } Poll::Ready(()) @@ -636,7 +636,7 @@ fn imports_stale_once() { runtime.block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { + if net.peer(1).client().header(hash).unwrap().is_some() { Poll::Ready(()) } else { Poll::Pending @@ -989,7 +989,7 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { net.peer(0).push_blocks(10, false); runtime.block_on(net.wait_until_sync()); - let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); + let hashof10 = net.peer(1).client().as_client().hash(10).unwrap().unwrap(); // there's currently no justification for block #10 assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); @@ -1063,8 +1063,8 @@ fn syncs_all_forks_from_single_peer() { runtime.block_on(net.wait_until_sync()); // Peer 1 should have both branches, - assert!(net.peer(1).client().header(&BlockId::Hash(branch1)).unwrap().is_some()); - assert!(net.peer(1).client().header(&BlockId::Hash(branch2)).unwrap().is_some()); + assert!(net.peer(1).client().header(branch1).unwrap().is_some()); + assert!(net.peer(1).client().header(branch2).unwrap().is_some()); } #[test] @@ -1089,7 +1089,7 @@ fn syncs_after_missing_announcement() { let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false); net.peer(1).push_blocks_at(BlockId::Number(10), 1, true); runtime.block_on(net.wait_until_sync()); - assert!(net.peer(1).client().header(&BlockId::Hash(final_block)).unwrap().is_some()); + assert!(net.peer(1).client().header(final_block).unwrap().is_some()); } #[test] diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 87e79833b9706..6b28d3f8a48e9 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -375,7 +375,7 @@ mod tests { client.clone(), )); let network = Arc::new(TestNetwork()); - let header = client.header(&BlockId::number(0)).unwrap().unwrap(); + let header = client.header(client.chain_info().genesis_hash).unwrap().unwrap(); // when let offchain = OffchainWorkers::new(client); diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index 375e724a33d69..ee7870e0629e0 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -62,7 +62,7 @@ where } fn header(&self, hash: Option) -> Result, Error> { - self.client.header(BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err) + self.client.header(self.unwrap_or_best(hash)).map_err(client_err) } fn block(&self, hash: Option) -> Result>, Error> { @@ -130,7 +130,7 @@ fn subscribe_headers( { // send current head right at the start. let maybe_header = client - .header(BlockId::Hash(best_block_hash())) + .header(best_block_hash()) .map_err(client_err) .and_then(|header| header.ok_or_else(|| Error::Other("Best header missing.".into()))) .map_err(|e| log::warn!("Best header error {:?}", e)) diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index be06b91ca747f..d5c8ce5550112 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -31,8 +31,8 @@ use jsonrpsee::{core::RpcResult, types::SubscriptionResult, SubscriptionSink}; use sc_client_api::BlockchainEvents; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ - generic::{BlockId, SignedBlock}, - traits::{Block as BlockT, Header, NumberFor}, + generic::SignedBlock, + traits::{Block as BlockT, NumberFor}, }; use self::error::Error; @@ -80,11 +80,9 @@ where )) })?; let block_num = >::from(block_num); - Ok(self + self .client() - .header(BlockId::number(block_num)) - .map_err(client_err)? - .map(|h| h.hash())) + .hash(block_num).map_err(client_err) }, } } diff --git a/client/rpc/src/dev/mod.rs b/client/rpc/src/dev/mod.rs index 7f4b68f56f6f6..ec9e61678fd71 100644 --- a/client/rpc/src/dev/mod.rs +++ b/client/rpc/src/dev/mod.rs @@ -87,7 +87,7 @@ where let parent_hash = *block.header().parent_hash(); let parent_header = self .client - .header(BlockId::Hash(parent_hash)) + .header(parent_hash) .map_err(|e| Error::BlockQueryError(Box::new(e)))?; if let Some(header) = parent_header { header diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index dd89ce6dff10a..c52c4e862f42d 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -70,7 +70,6 @@ use sp_consensus::block_validation::{ use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}, BuildStorage, }; @@ -456,7 +455,7 @@ where sp_session::generate_initial_session_keys( client.clone(), - &BlockId::Hash(chain_info.best_hash), + chain_info.best_hash, config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), ) .map_err(|e| Error::Application(Box::new(e)))?; diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 8ded5ec95c166..e81e1aeb52ae9 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -913,7 +913,7 @@ where let header = self .backend .blockchain() - .header(BlockId::Hash(block))? + .header(block)? .expect("Block to finalize expected to be onchain; qed"); operation.notify_finalized = Some(FinalizeSummary { header, finalized, stale_heads }); @@ -1049,9 +1049,9 @@ where /// Get block header by id. pub fn header( &self, - id: &BlockId, + hash: Block::Hash, ) -> sp_blockchain::Result::Header>> { - self.backend.blockchain().header(*id) + self.backend.blockchain().header(hash) } /// Get block body by id. @@ -1068,11 +1068,11 @@ where target_hash: Block::Hash, max_generation: NumberFor, ) -> sp_blockchain::Result> { - let load_header = |id: Block::Hash| -> sp_blockchain::Result { + let load_header = |hash: Block::Hash| -> sp_blockchain::Result { self.backend .blockchain() - .header(BlockId::Hash(id))? - .ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))) + .header(hash)? + .ok_or_else(|| Error::UnknownBlock(format!("{:?}", hash))) }; let genesis_hash = self.backend.blockchain().info().genesis_hash; @@ -1548,7 +1548,7 @@ where ) -> sp_blockchain::Result> { Ok(Client::uncles(self, target_hash, max_generation)? .into_iter() - .filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None)) + .filter_map(|hash| Client::header(self, hash).unwrap_or(None)) .collect()) } } @@ -1560,8 +1560,8 @@ where Block: BlockT, RA: Send + Sync, { - fn header(&self, id: BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().header(id) + fn header(&self, hash: Block::Hash) -> sp_blockchain::Result> { + self.backend.blockchain().header(hash) } fn info(&self) -> blockchain::Info { @@ -1612,8 +1612,8 @@ where Block: BlockT, RA: Send + Sync, { - fn header(&self, id: BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().header(id) + fn header(&self, hash: Block::Hash) -> sp_blockchain::Result> { + self.backend.blockchain().header(hash) } fn info(&self) -> blockchain::Info { @@ -1944,11 +1944,10 @@ where } fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { - Ok(match self.header(id)? { - Some(header) => { - let hash = header.hash(); - match (self.body(hash)?, self.justifications(hash)?) { - (Some(extrinsics), justifications) => + Ok(match self.backend.blockchain().block_hash_from_id(id)? { + Some(hash) => { + match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { + (Some(header), Some(extrinsics), justifications) => Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index be9253d8c78e8..edca7e48aa5c7 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1147,7 +1147,7 @@ fn get_header_by_block_number_doesnt_panic() { // backend uses u32 for block numbers, make sure we don't panic when // trying to convert let id = BlockId::::Number(72340207214430721); - client.header(&id).expect_err("invalid block number overflows u32"); + client.expect_header(id).expect_err("invalid block number overflows u32"); } #[test] diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index 9540d94c57918..9edc2ceecb78f 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -48,10 +48,7 @@ use jsonrpsee::{ }; use sc_client_api::StorageData; use sp_blockchain::HeaderBackend; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::sync::Arc; type SharedAuthoritySet = @@ -164,7 +161,7 @@ where let finalized_hash = self.client.info().finalized_hash; let finalized_header = self .client - .header(BlockId::Hash(finalized_hash))? + .header(finalized_hash)? .ok_or_else(|| sp_blockchain::Error::MissingHeader(finalized_hash.to_string()))?; let finalized_block_weight = diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index 63fd1de374cba..89c0945e0eb03 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -217,10 +217,9 @@ where pub fn trace_block(&self) -> TraceBlockResult { tracing::debug!(target: "state_tracing", "Tracing block: {}", self.block); // Prepare the block - let id = BlockId::Hash(self.block); let mut header = self .client - .header(id) + .header(self.block) .map_err(Error::InvalidBlockId)? .ok_or_else(|| Error::MissingBlockComponent("Header not found".to_string()))?; let extrinsics = self @@ -298,7 +297,7 @@ where }) } else { TraceBlockResponse::BlockTrace(BlockTrace { - block_hash: block_id_as_string(id), + block_hash: block_id_as_string(BlockId::::Hash(self.block)), parent_hash: block_id_as_string(parent_id), tracing_targets: targets.to_string(), storage_keys: self.storage_keys.clone().unwrap_or_default(), diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index 602e84b47775c..97d8edddffdbc 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -117,7 +117,7 @@ impl ChainApi for TestApi { fn block_header( &self, - _: &BlockId, + _: ::Hash, ) -> Result::Header>, Self::Error> { Ok(None) } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index c3f9b50f9482d..a2b317cfb3998 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -190,9 +190,9 @@ where fn block_header( &self, - at: &BlockId, + hash: ::Hash, ) -> Result::Header>, Self::Error> { - self.client.header(*at).map_err(Into::into) + self.client.header(hash).map_err(Into::into) } fn tree_route( diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 9c747ade1229a..96cbeb9b80cfb 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -96,7 +96,7 @@ pub trait ChainApi: Send + Sync { /// Returns a block header given the block id. fn block_header( &self, - at: &BlockId, + at: ::Hash, ) -> Result::Header>, Self::Error>; /// Compute a tree-route between two blocks. See [`TreeRoute`] for more details. diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 0124038b75949..f3797b180f14d 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -556,7 +556,7 @@ async fn prune_known_txs_for_block h, Ok(None) => { log::debug!(target: "txpool", "Could not find header for {:?}.", block_hash); diff --git a/client/transaction-pool/src/tests.rs b/client/transaction-pool/src/tests.rs index d8732077eba52..d399367eae818 100644 --- a/client/transaction-pool/src/tests.rs +++ b/client/transaction-pool/src/tests.rs @@ -170,7 +170,7 @@ impl ChainApi for TestApi { fn block_header( &self, - _: &BlockId, + _: ::Hash, ) -> Result::Header>, Self::Error> { Ok(None) } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 33edc56d4b6ba..94d696c58392f 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -33,7 +33,7 @@ use crate::error::{Error, Result}; /// Blockchain database header backend. Does not perform any validation. pub trait HeaderBackend: Send + Sync { /// Get block header. Returns `None` if block is not found. - fn header(&self, id: BlockId) -> Result>; + fn header(&self, hash: Block::Hash) -> Result>; /// Get blockchain info. fn info(&self) -> Info; /// Get block status. @@ -64,7 +64,7 @@ pub trait HeaderBackend: Send + Sync { /// Get block header. Returns `UnknownBlock` error if block is not found. fn expect_header(&self, id: BlockId) -> Result { - self.header(id)? + self.header(self.expect_block_hash_from_id(&id)?)? .ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) } @@ -201,7 +201,7 @@ pub trait Backend: import_lock: &RwLock<()>, ) -> Result> { let target_header = { - match self.header(BlockId::Hash(target_hash))? { + match self.header(target_hash)? { Some(x) => x, // target not in blockchain None => return Ok(None), @@ -256,7 +256,7 @@ pub trait Backend: if let Some(max_number) = maybe_max_number { loop { let current_header = self - .header(BlockId::Hash(current_hash))? + .header(current_hash)? .ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?; if current_header.number() <= &max_number { @@ -276,7 +276,7 @@ pub trait Backend: } let current_header = self - .header(BlockId::Hash(current_hash))? + .header(current_hash)? .ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?; // stop search in this chain once we go below the target's block number diff --git a/primitives/session/src/lib.rs b/primitives/session/src/lib.rs index dde262738ad71..9b403409c9151 100644 --- a/primitives/session/src/lib.rs +++ b/primitives/session/src/lib.rs @@ -110,7 +110,7 @@ impl GetValidatorCount for MembershipProof { #[cfg(feature = "std")] pub fn generate_initial_session_keys( client: std::sync::Arc, - at: &BlockId, + at: Block::Hash, seeds: Vec, ) -> Result<(), sp_api::ApiError> where @@ -125,7 +125,7 @@ where let runtime_api = client.runtime_api(); for seed in seeds { - runtime_api.generate_session_keys(at, Some(seed.as_bytes().to_vec()))?; + runtime_api.generate_session_keys(&BlockId::Hash(at), Some(seed.as_bytes().to_vec()))?; } Ok(()) diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index 65aa3e65e6141..19772c8c94067 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -430,21 +430,10 @@ where let genesis_hash = client.chain_info().genesis_hash; - assert_eq!(blockchain.header(BlockId::Number(0)).unwrap().unwrap().hash(), genesis_hash); assert_eq!(blockchain.hash(0).unwrap().unwrap(), genesis_hash); - - assert_eq!(blockchain.header(BlockId::Number(1)).unwrap().unwrap().hash(), a1.hash()); assert_eq!(blockchain.hash(1).unwrap().unwrap(), a1.hash()); - - assert_eq!(blockchain.header(BlockId::Number(2)).unwrap().unwrap().hash(), a2.hash()); assert_eq!(blockchain.hash(2).unwrap().unwrap(), a2.hash()); - - assert_eq!(blockchain.header(BlockId::Number(3)).unwrap().unwrap().hash(), a3.hash()); assert_eq!(blockchain.hash(3).unwrap().unwrap(), a3.hash()); - - assert_eq!(blockchain.header(BlockId::Number(4)).unwrap().unwrap().hash(), a4.hash()); assert_eq!(blockchain.hash(4).unwrap().unwrap(), a4.hash()); - - assert_eq!(blockchain.header(BlockId::Number(5)).unwrap().unwrap().hash(), a5.hash()); assert_eq!(blockchain.hash(5).unwrap().unwrap(), a5.hash()); } diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index f8d551a6fa5bd..aadd83c00e351 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -326,14 +326,9 @@ impl sc_transaction_pool::ChainApi for TestApi { fn block_header( &self, - at: &BlockId, + hash: ::Hash, ) -> Result::Header>, Self::Error> { - Ok(match at { - BlockId::Number(num) => - self.chain.read().block_by_number.get(num).map(|b| b[0].0.header().clone()), - BlockId::Hash(hash) => - self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), - }) + Ok(self.chain.read().block_by_hash.get(&hash).map(|b| b.header().clone())) } fn tree_route( diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 55a7b60d55552..1917bc5f8fd07 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -21,10 +21,7 @@ use sc_client_db::{DbHash, DbState, DbStateBuilder}; use sp_api::StateBackend; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, HashFor, Header as HeaderT}, -}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; use sp_trie::PrefixedMemoryDB; use log::{info, trace}; @@ -58,7 +55,7 @@ impl StorageCmd { let mut record = BenchRecord::default(); let best_hash = client.usage_info().chain.best_hash; - let header = client.header(BlockId::Hash(best_hash))?.ok_or("Header not found")?; + let header = client.header(best_hash)?.ok_or("Header not found")?; let original_root = *header.state_root(); let trie = DbStateBuilder::::new(storage.clone(), original_root).build(); From d06359cab8e6eb78d625b7520b37496252139a22 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 8 Dec 2022 16:12:43 +0100 Subject: [PATCH 02/11] non-trivial usages of haeder(block_id) refactored This may required introduction of dedicated function: header_for_block_num --- client/finality-grandpa/src/warp_proof.rs | 8 ++++++-- client/network/sync/src/block_request_handler.rs | 10 +++++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 43d9767bf279b..c21684d5baf96 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -116,8 +116,12 @@ impl WarpSyncProof { let set_changes = set_changes.iter_from(begin_number).ok_or(Error::MissingData)?; for (_, last_block) in set_changes { - let header = blockchain.header(BlockId::Number(*last_block))?.expect( - "header number comes from previously applied set changes; must exist in db; qed.", + let hash = blockchain.hash(*last_block)?.expect( + "header number comes from previously applied set changes; corresponding hash must exist in db; qed.", + ); + + let header = blockchain.header(hash)?.expect( + "header for given hash obtained successfully from db exists in db; qed.", ); // the last block in a set is the one that triggers a change to the next set, diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index b5f8b6b73bce9..f24c91ab2ffab 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -330,7 +330,15 @@ where let mut blocks = Vec::new(); let mut total_size: usize = 0; - while let Some(header) = self.client.header(block_id).unwrap_or_default() { + + let client_header_from_block_id = |block_id: BlockId| -> Result, HandleRequestError> { + if let Some(hash) = self.client.block_hash_from_id(&block_id)? { + return self.client.header(hash).map_err(Into::into) + } + Ok(None) + }; + + while let Some(header) = client_header_from_block_id(block_id).unwrap_or_default() { let number = *header.number(); let hash = header.hash(); let parent_hash = *header.parent_hash(); From ce678ebe2f527d7de2ec7517f5d86e4ef0f1b893 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 8 Dec 2022 16:16:42 +0100 Subject: [PATCH 03/11] fmt --- .../basic-authorship/src/basic_authorship.rs | 65 ++++++------------- .../consensus/manual-seal/src/seal_block.rs | 5 +- client/db/src/lib.rs | 45 +++++++------ client/finality-grandpa/src/warp_proof.rs | 6 +- .../network/sync/src/block_request_handler.rs | 13 ++-- client/network/test/src/lib.rs | 8 +-- client/rpc/src/chain/mod.rs | 4 +- client/service/src/client/client.rs | 5 +- 8 files changed, 63 insertions(+), 88 deletions(-) diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 6fb17971f142e..159b11aa15dec 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -614,13 +614,9 @@ mod tests { block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)])) .unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(BlockId::Number(0u64)) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(BlockId::Number(0u64)).expect("there should be header"), + ))); let mut proposer_factory = ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); @@ -707,13 +703,9 @@ mod tests { block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)])).unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(BlockId::Number(0u64)) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(BlockId::Number(0u64)).expect("there should be header"), + ))); let mut proposer_factory = ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); @@ -807,26 +799,18 @@ mod tests { block }; - block_on( - txpool.maintain(chain_event( - client - .expect_header(BlockId::Number(0u64)) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(BlockId::Number(0u64)).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), 7); // let's create one block and import it let block = propose_block(&client, 0, 2, 7); block_on(client.import(BlockOrigin::Own, block)).unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(BlockId::Number(1)) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(BlockId::Number(1)).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), 5); // now let's make sure that we can still make some progress @@ -845,9 +829,8 @@ mod tests { spawner.clone(), client.clone(), ); - let genesis_header = client - .expect_header(BlockId::Number(0u64)) - .expect("there should be header"); + let genesis_header = + client.expect_header(BlockId::Number(0u64)).expect("there should be header"); let extrinsics_num = 5; let extrinsics = std::iter::once( @@ -957,13 +940,9 @@ mod tests { ) .unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(BlockId::Number(0u64)) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(BlockId::Number(0u64)).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3); let mut proposer_factory = @@ -1019,13 +998,9 @@ mod tests { ) .unwrap(); - block_on( - txpool.maintain(chain_event( - client - .expect_header(BlockId::Number(0u64)) - .expect("there should be header"), - )), - ); + block_on(txpool.maintain(chain_event( + client.expect_header(BlockId::Number(0u64)).expect("there should be header"), + ))); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 2); let mut proposer_factory = diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index d7289bdbe76f2..2cafdae503dce 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -99,9 +99,8 @@ pub async fn seal_block( // use the parent_hash supplied via `EngineCommand` // or fetch the best_block. let parent = match parent_hash { - Some(hash) => client - .header(hash)? - .ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?, + Some(hash) => + client.header(hash)?.ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))?, None => select_chain.best_chain().await?, }; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 14e4304198c3f..7226d090f7016 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -529,8 +529,12 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha if let Some(result) = cache.get_refresh(&hash) { return Ok(result.clone()) } - let header = - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::::Hash(hash))?; + let header = utils::read_header( + &*self.db, + columns::KEY_LOOKUP, + columns::HEADER, + BlockId::::Hash(hash), + )?; cache_header(&mut cache, hash, header.clone()); Ok(header) } @@ -565,8 +569,13 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } fn hash(&self, number: NumberFor) -> ClientResult> { - Ok(utils::read_header::(&*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Number(number))? - .map(|header| header.hash())) + Ok(utils::read_header::( + &*self.db, + columns::KEY_LOOKUP, + columns::HEADER, + BlockId::Number(number), + )? + .map(|header| header.hash())) } } @@ -1388,8 +1397,7 @@ impl Backend { .highest_leaf() .map(|(n, _)| n) .unwrap_or(Zero::zero()); - let existing_header = - number <= highest_leaf && self.blockchain.header(hash)?.is_some(); + let existing_header = number <= highest_leaf && self.blockchain.header(hash)?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1609,10 +1617,7 @@ impl Backend { ); } } else if number > best_num + One::one() && - number > One::one() && self - .blockchain - .header(parent_hash)? - .is_none() + number > One::one() && self.blockchain.header(parent_hash)?.is_none() { let gap = (best_num + One::one(), number - One::one()); transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); @@ -1634,10 +1639,9 @@ impl Backend { }; if let Some(set_head) = operation.set_head { - if let Some(header) = sc_client_api::blockchain::HeaderBackend::header( - &self.blockchain, - set_head, - )? { + if let Some(header) = + sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? + { let number = header.number(); let hash = header.hash(); @@ -2133,13 +2137,12 @@ impl sc_client_api::backend::Backend for Backend { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); - let removed = - self.blockchain.header(hash_to_revert)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block header not found.", - hash_to_revert, - )) - })?; + let removed = self.blockchain.header(hash_to_revert)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block header not found.", + hash_to_revert, + )) + })?; let removed_hash = removed.hash(); let prev_number = number_to_revert.saturating_sub(One::one()); diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index c21684d5baf96..be53725183879 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -120,9 +120,9 @@ impl WarpSyncProof { "header number comes from previously applied set changes; corresponding hash must exist in db; qed.", ); - let header = blockchain.header(hash)?.expect( - "header for given hash obtained successfully from db exists in db; qed.", - ); + let header = blockchain + .header(hash)? + .expect("header for given hash obtained successfully from db exists in db; qed."); // the last block in a set is the one that triggers a change to the next set, // therefore the block must have a digest that signals the authority set change diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index f24c91ab2ffab..84755453fbea0 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -331,12 +331,13 @@ where let mut total_size: usize = 0; - let client_header_from_block_id = |block_id: BlockId| -> Result, HandleRequestError> { - if let Some(hash) = self.client.block_hash_from_id(&block_id)? { - return self.client.header(hash).map_err(Into::into) - } - Ok(None) - }; + let client_header_from_block_id = + |block_id: BlockId| -> Result, HandleRequestError> { + if let Some(hash) = self.client.block_hash_from_id(&block_id)? { + return self.client.header(hash).map_err(Into::into) + } + Ok(None) + }; while let Some(header) = client_header_from_block_id(block_id).unwrap_or_default() { let number = *header.number(); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 43b36e361e4ee..2bd05f63e4ea7 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -171,13 +171,13 @@ impl PeersClient { pub fn has_state_at(&self, block: &BlockId) -> bool { let (number, hash) = match *block { BlockId::Hash(h) => match self.as_client().number(h) { - Ok(Some(n)) => (n,h), + Ok(Some(n)) => (n, h), _ => return false, - } + }, BlockId::Number(n) => match self.as_client().hash(n) { - Ok(Some(h)) => (n,h), + Ok(Some(h)) => (n, h), _ => return false, - } + }, }; self.backend.have_state_at(hash, number) } diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index d5c8ce5550112..16e69c0f6324f 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -80,9 +80,7 @@ where )) })?; let block_num = >::from(block_num); - self - .client() - .hash(block_num).map_err(client_err) + self.client().hash(block_num).map_err(client_err) }, } } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index e81e1aeb52ae9..bf457adf5e7e3 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1945,13 +1945,12 @@ where fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { Ok(match self.backend.blockchain().block_hash_from_id(id)? { - Some(hash) => { + Some(hash) => match (self.header(hash)?, self.body(hash)?, self.justifications(hash)?) { (Some(header), Some(extrinsics), justifications) => Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, - } - }, + }, None => None, }) } From 7f62b7ff702537ff9b8eccc0bdc89e3a1304b270 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 8 Dec 2022 16:37:51 +0100 Subject: [PATCH 04/11] fix --- client/merkle-mountain-range/src/test_utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/merkle-mountain-range/src/test_utils.rs b/client/merkle-mountain-range/src/test_utils.rs index f345fb52578ab..1bc7a9bd5bbac 100644 --- a/client/merkle-mountain-range/src/test_utils.rs +++ b/client/merkle-mountain-range/src/test_utils.rs @@ -226,8 +226,8 @@ impl HeaderMetadata for MockClient { } impl HeaderBackend for MockClient { - fn header(&self, id: BlockId) -> sc_client_api::blockchain::Result> { - self.client.lock().header(&id) + fn header(&self, hash: Hash) -> sc_client_api::blockchain::Result> { + self.client.lock().header(hash) } fn info(&self) -> Info { From 1ed915032b331d9617ee39f3ca364ebf8e24d7f8 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 9 Dec 2022 14:52:00 +0100 Subject: [PATCH 05/11] doc fixed --- client/basic-authorship/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 4a26ebd9df970..e7f0d4496edeb 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -50,7 +50,7 @@ //! //! // From this factory, we create a `Proposer`. //! let proposer = proposer_factory.init( -//! &client.header(&BlockId::number(0)).unwrap().unwrap(), +//! &client.header(client.chain_info().genesis_hash).unwrap().unwrap(), //! ); //! //! // The proposer is created asynchronously. From d0aaea97a64227cfc1f7b9780435a308492ec252 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Mon, 12 Dec 2022 19:24:30 +0000 Subject: [PATCH 06/11] ".git/.scripts/fmt.sh" --- client/network/src/protocol.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 91249b9973db9..1f0ec9c0d120d 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -46,9 +46,7 @@ use sc_network_common::{ utils::{interval, LruHashSet}, }; use sp_arithmetic::traits::SaturatedConversion; -use sp_runtime::{ - traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, -}; +use sp_runtime::traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}; use std::{ collections::{HashMap, HashSet, VecDeque}, io, iter, From ae66534b52cde976798b25dc36bc8e1c2fda50c9 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 15 Dec 2022 11:33:02 +0100 Subject: [PATCH 07/11] BlockId removal: refactor: HeaderBackend::expect_header It changes the arguments of `HeaderBackend::expect_header` method from: `BlockId` to: `Block::Hash` --- .../basic-authorship/src/basic_authorship.rs | 75 ++++++++++++------- client/beefy/src/lib.rs | 2 +- client/beefy/src/tests.rs | 3 +- client/beefy/src/worker.rs | 25 +++++-- client/consensus/aura/src/lib.rs | 2 +- client/db/src/lib.rs | 6 +- client/finality-grandpa/src/finality_proof.rs | 3 +- client/finality-grandpa/src/tests.rs | 9 ++- client/finality-grandpa/src/warp_proof.rs | 7 +- client/service/test/src/client/mod.rs | 13 +++- primitives/blockchain/src/backend.rs | 6 +- 11 files changed, 100 insertions(+), 51 deletions(-) diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 159b11aa15dec..c39d07a14f0f1 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -614,16 +614,20 @@ mod tests { block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)])) .unwrap(); - block_on(txpool.maintain(chain_event( - client.expect_header(BlockId::Number(0u64)).expect("there should be header"), - ))); + block_on( + txpool.maintain(chain_event( + client + .expect_header(client.info().genesis_hash) + .expect("there should be header"), + )), + ); let mut proposer_factory = ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( - &client.expect_header(BlockId::number(0)).unwrap(), + &client.expect_header(client.info().genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); if !value.0 { @@ -667,7 +671,7 @@ mod tests { let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( - &client.expect_header(BlockId::number(0)).unwrap(), + &client.expect_header(client.info().genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); if !value.0 { @@ -703,9 +707,13 @@ mod tests { block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)])).unwrap(); - block_on(txpool.maintain(chain_event( - client.expect_header(BlockId::Number(0u64)).expect("there should be header"), - ))); + block_on( + txpool.maintain(chain_event( + client + .expect_header(client.info().genesis_hash) + .expect("there should be header"), + )), + ); let mut proposer_factory = ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); @@ -779,8 +787,9 @@ mod tests { number, expected_block_extrinsics, expected_pool_transactions| { + let hash = client.expect_block_hash_from_id(&BlockId::Number(number)).unwrap(); let proposer = proposer_factory.init_with_now( - &client.expect_header(BlockId::number(number)).unwrap(), + &client.expect_header(hash).unwrap(), Box::new(move || time::Instant::now()), ); @@ -799,18 +808,25 @@ mod tests { block }; - block_on(txpool.maintain(chain_event( - client.expect_header(BlockId::Number(0u64)).expect("there should be header"), - ))); + block_on( + txpool.maintain(chain_event( + client + .expect_header(client.info().genesis_hash) + .expect("there should be header"), + )), + ); assert_eq!(txpool.ready().count(), 7); // let's create one block and import it let block = propose_block(&client, 0, 2, 7); + let hashof1 = block.hash(); block_on(client.import(BlockOrigin::Own, block)).unwrap(); - block_on(txpool.maintain(chain_event( - client.expect_header(BlockId::Number(1)).expect("there should be header"), - ))); + block_on( + txpool.maintain(chain_event( + client.expect_header(hashof1).expect("there should be header"), + )), + ); assert_eq!(txpool.ready().count(), 5); // now let's make sure that we can still make some progress @@ -829,8 +845,9 @@ mod tests { spawner.clone(), client.clone(), ); - let genesis_header = - client.expect_header(BlockId::Number(0u64)).expect("there should be header"); + let genesis_header = client + .expect_header(client.info().genesis_hash) + .expect("there should be header"); let extrinsics_num = 5; let extrinsics = std::iter::once( @@ -940,9 +957,13 @@ mod tests { ) .unwrap(); - block_on(txpool.maintain(chain_event( - client.expect_header(BlockId::Number(0u64)).expect("there should be header"), - ))); + block_on( + txpool.maintain(chain_event( + client + .expect_header(client.info().genesis_hash) + .expect("there should be header"), + )), + ); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3); let mut proposer_factory = @@ -950,7 +971,7 @@ mod tests { let cell = Mutex::new(time::Instant::now()); let proposer = proposer_factory.init_with_now( - &client.expect_header(BlockId::number(0)).unwrap(), + &client.expect_header(client.info().genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); let old = *value; @@ -998,9 +1019,13 @@ mod tests { ) .unwrap(); - block_on(txpool.maintain(chain_event( - client.expect_header(BlockId::Number(0u64)).expect("there should be header"), - ))); + block_on( + txpool.maintain(chain_event( + client + .expect_header(client.info().genesis_hash) + .expect("there should be header"), + )), + ); assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 2); let mut proposer_factory = @@ -1010,7 +1035,7 @@ mod tests { let cell = Arc::new(Mutex::new((0, time::Instant::now()))); let cell2 = cell.clone(); let proposer = proposer_factory.init_with_now( - &client.expect_header(BlockId::number(0)).unwrap(), + &client.expect_header(client.info().genesis_hash).unwrap(), Box::new(move || { let mut value = cell.lock(); let (called, old) = *value; diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index a057a9fdc597d..35f7cac55a964 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -412,7 +412,7 @@ where })?; // Move up the chain. - header = blockchain.expect_header(BlockId::Hash(parent_hash))?; + header = blockchain.expect_header(parent_hash)?; }; aux_schema::write_current_version(backend)?; diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index f6ab0dd1020f1..48201f7f33e52 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -448,9 +448,8 @@ fn wait_for_best_beefy_blocks( wait_for.push(Box::pin(stream.take(len).for_each(move |best_beefy_hash| { let expected = expected.next(); async move { - let block_id = BlockId::hash(best_beefy_hash); let header = - net.lock().peer(i).client().as_client().expect_header(block_id).unwrap(); + net.lock().peer(i).client().as_client().expect_header(best_beefy_hash).unwrap(); let best_beefy = *header.number(); assert_eq!(expected, Some(best_beefy).as_ref()); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index bba3563a8f70e..2370bf188413d 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -465,7 +465,7 @@ where .map(|hash| { backend .blockchain() - .expect_header(BlockId::hash(*hash)) + .expect_header(*hash) .expect("just finalized block should be available; qed.") }) .chain(std::iter::once(header.clone())) @@ -718,15 +718,26 @@ where let target_header = if target_number == self.best_grandpa_block() { self.persisted_state.best_grandpa_block_header.clone() } else { - self.backend + let hash = self.backend .blockchain() - .expect_header(BlockId::Number(target_number)) + .expect_block_hash_from_id(&BlockId::Number(target_number)) .map_err(|err| { let err_msg = format!( - "Couldn't get header for block #{:?} (error: {:?}), skipping vote..", + "Couldn't get hash for block #{:?} (error: {:?}), skipping vote..", target_number, err ); Error::Backend(err_msg) + })?; + + self.backend + .blockchain() + .expect_header(hash) + .map_err(|err| { + let err_msg = format!( + "Couldn't get header for block #{:?} ({:?}) (error: {:?}), skipping vote..", + target_number, hash, err + ); + Error::Backend(err_msg) })? }; let target_hash = target_header.hash(); @@ -1051,8 +1062,10 @@ pub(crate) mod tests { "/beefy/justifs/1".into(), known_peers, ); - let at = BlockId::number(Zero::zero()); - let genesis_header = backend.blockchain().expect_header(at).unwrap(); + let genesis_header = backend + .blockchain() + .expect_header(backend.blockchain().info().genesis_hash) + .unwrap(); let persisted_state = PersistedState::checked_new( genesis_header, Zero::zero(), diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 3d9d398262a15..ed5fa29077881 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -969,7 +969,7 @@ mod tests { compatibility_mode: Default::default(), }; - let head = client.expect_header(BlockId::Number(0)).unwrap(); + let head = client.expect_header(client.info().genesis_hash).unwrap(); let res = runtime .block_on(worker.on_slot(SlotInfo { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 10bc8fe48822f..cd0be5f1aeba1 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1372,7 +1372,7 @@ impl Backend { }; for (block_hash, justification) in operation.finalized_blocks { - let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; + let block_header = self.blockchain.expect_header(block_hash)?; meta_updates.push(self.finalize_block_with_transaction( &mut transaction, block_hash, @@ -2010,7 +2010,7 @@ impl sc_client_api::backend::Backend for Backend { justification: Option, ) -> ClientResult<()> { let mut transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(hash))?; + let header = self.blockchain.expect_header(hash)?; let mut displaced = None; let m = self.finalize_block_with_transaction( @@ -2032,7 +2032,7 @@ impl sc_client_api::backend::Backend for Backend { justification: Justification, ) -> ClientResult<()> { let mut transaction: Transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(hash))?; + let header = self.blockchain.expect_header(hash)?; let number = *header.number(); // Check if the block is finalized first. diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 43ed0ed31993e..416e71478ab4d 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -222,7 +222,8 @@ where if current > just_block || headers.len() >= MAX_UNKNOWN_HEADERS { break } - headers.push(backend.blockchain().expect_header(BlockId::Number(current))?); + let hash = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(current))?; + headers.push(backend.blockchain().expect_header(hash)?); current += One::one(); } headers diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 2d9434cd2132f..fcb075ec7fa06 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1564,9 +1564,12 @@ fn justification_with_equivocation() { net.peer(0).push_blocks(3, false); let client = net.peer(0).client().as_client().clone(); - let block1 = client.expect_header(BlockId::Number(1)).unwrap(); - let block2 = client.expect_header(BlockId::Number(2)).unwrap(); - let block3 = client.expect_header(BlockId::Number(3)).unwrap(); + let hashof1 = client.expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); + let hashof2 = client.expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); + let hashof3 = client.expect_block_hash_from_id(&BlockId::Number(3)).unwrap(); + let block1 = client.expect_header(hashof1).unwrap(); + let block2 = client.expect_header(hashof2).unwrap(); + let block3 = client.expect_header(hashof3).unwrap(); let set_id = 0; let justification = { diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index be53725183879..c65a705d3e844 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -116,13 +116,12 @@ impl WarpSyncProof { let set_changes = set_changes.iter_from(begin_number).ok_or(Error::MissingData)?; for (_, last_block) in set_changes { - let hash = blockchain.hash(*last_block)?.expect( - "header number comes from previously applied set changes; corresponding hash must exist in db; qed.", - ); + let hash = blockchain.block_hash_from_id(&BlockId::Number(*last_block))? + .expect("header number comes from previously applied set changes; corresponding hash must exist in db; qed."); let header = blockchain .header(hash)? - .expect("header for given hash obtained successfully from db exists in db; qed."); + .expect("header hash obtained from header number exists in db; corresponding header must exist in db too; qed."); // the last block in a set is the one that triggers a change to the next set, // therefore the block must have a digest that signals the authority set change diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index edca7e48aa5c7..2449531705ad1 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1141,13 +1141,22 @@ fn get_block_by_bad_block_hash_returns_none() { } #[test] -fn get_header_by_block_number_doesnt_panic() { +fn expect_block_hash_by_block_number_doesnt_panic() { let client = substrate_test_runtime_client::new(); // backend uses u32 for block numbers, make sure we don't panic when // trying to convert let id = BlockId::::Number(72340207214430721); - client.expect_header(id).expect_err("invalid block number overflows u32"); + client.block_hash_from_id(&id).expect_err("invalid block number overflows u32"); +} + +#[test] +fn get_hash_by_block_number_doesnt_panic() { + let client = substrate_test_runtime_client::new(); + + // backend uses u32 for block numbers, make sure we don't panic when + // trying to convert + client.hash(72340207214430721).expect_err("invalid block number overflows u32"); } #[test] diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 94d696c58392f..6199fd25d0fca 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -63,9 +63,9 @@ pub trait HeaderBackend: Send + Sync { } /// Get block header. Returns `UnknownBlock` error if block is not found. - fn expect_header(&self, id: BlockId) -> Result { - self.header(self.expect_block_hash_from_id(&id)?)? - .ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) + fn expect_header(&self, hash: Block::Hash) -> Result { + self.header(hash)? + .ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", hash))) } /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is From 9229dd2e286fc4204faa3156ed0efdc19b8c59e6 Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Thu, 15 Dec 2022 11:53:57 +0000 Subject: [PATCH 08/11] ".git/.scripts/fmt.sh" --- client/beefy/src/worker.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 2370bf188413d..c32b34a593530 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -718,7 +718,8 @@ where let target_header = if target_number == self.best_grandpa_block() { self.persisted_state.best_grandpa_block_header.clone() } else { - let hash = self.backend + let hash = self + .backend .blockchain() .expect_block_hash_from_id(&BlockId::Number(target_number)) .map_err(|err| { @@ -729,16 +730,13 @@ where Error::Backend(err_msg) })?; - self.backend - .blockchain() - .expect_header(hash) - .map_err(|err| { - let err_msg = format!( - "Couldn't get header for block #{:?} ({:?}) (error: {:?}), skipping vote..", - target_number, hash, err - ); - Error::Backend(err_msg) - })? + self.backend.blockchain().expect_header(hash).map_err(|err| { + let err_msg = format!( + "Couldn't get header for block #{:?} ({:?}) (error: {:?}), skipping vote..", + target_number, hash, err + ); + Error::Backend(err_msg) + })? }; let target_hash = target_header.hash(); From 6d22a6cbfdab066bf027ee707628f1fdce15a20e Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 19 Dec 2022 11:54:22 +0100 Subject: [PATCH 09/11] readme updated --- client/basic-authorship/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/basic-authorship/README.md b/client/basic-authorship/README.md index d29ce258e5134..f2f160b6e2a97 100644 --- a/client/basic-authorship/README.md +++ b/client/basic-authorship/README.md @@ -8,7 +8,7 @@ let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone(), // From this factory, we create a `Proposer`. let proposer = proposer_factory.init( - &client.header(&BlockId::number(0)).unwrap().unwrap(), + &client.header(client.chain_info().genesis_hash).unwrap().unwrap(), ); // The proposer is created asynchronously. From a6b5c7860281afbc2d9665f51ea15d4d65c4b2ec Mon Sep 17 00:00:00 2001 From: command-bot <> Date: Tue, 20 Dec 2022 08:48:49 +0000 Subject: [PATCH 10/11] ".git/.scripts/fmt.sh" --- client/service/src/builder.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 85ccd11fd6482..0a26c00485e2f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -70,9 +70,7 @@ use sp_consensus::block_validation::{ }; use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; -use sp_runtime::{ - traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}, -}; +use sp_runtime::traits::{Block as BlockT, BlockIdTo, NumberFor, Zero}; use std::{str::FromStr, sync::Arc, time::SystemTime}; /// Full client type. From 9f35ae51a03b60788a6cdfc1fc548679725c3db2 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 20 Dec 2022 09:53:15 +0100 Subject: [PATCH 11/11] fix --- client/rpc-spec-v2/src/chain_head/chain_head.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/rpc-spec-v2/src/chain_head/chain_head.rs b/client/rpc-spec-v2/src/chain_head/chain_head.rs index c55625e99cd45..c0ba9e165dd04 100644 --- a/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -610,7 +610,7 @@ where } self.client - .header(BlockId::Hash(hash)) + .header(hash) .map(|opt_header| opt_header.map(|h| format!("0x{:?}", HexDisplay::from(&h.encode())))) .map_err(ChainHeadRpcError::FetchBlockHeader) .map_err(Into::into)