Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
BlockId removal: refactor: HeaderBackend::header (#6418)
Browse files Browse the repository at this point in the history
* BlockId removal: refactor: HeaderBackend::header

It changes the arguments of:
- `HeaderBackend::header`,
- `Client::header`

methods from: `BlockId<Block>` to: `Block::Hash`

This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292)

* missed fixes

* BlockId removal: refactor: HeaderBackend::expect_header

It changes the arguments of `HeaderBackend::expect_header` method from: `BlockId<Block>` to: `Block::Hash`

* update lockfile for {"substrate"}

* misspell fixed

Co-authored-by: parity-processbot <>
  • Loading branch information
michalkucharczyk authored Dec 20, 2022
1 parent 0c659e5 commit 86c134e
Show file tree
Hide file tree
Showing 10 changed files with 228 additions and 237 deletions.
361 changes: 180 additions & 181 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion cli/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -541,7 +541,7 @@ pub fn run() -> Result<()> {
ensure_dev(chain_spec).map_err(Error::Other)?;
runner.sync_run(|mut config| {
let (client, _, _, _) = service::new_chain_ops(&mut config, None)?;
let header = client.header(BlockId::Number(0_u32.into())).unwrap().unwrap();
let header = client.header(client.info().genesis_hash).unwrap().unwrap();
let inherent_data = benchmark_inherent_data(header)
.map_err(|e| format!("generating inherent data: {:?}", e))?;
let remark_builder = RemarkBuilder::new(client.clone());
Expand Down
4 changes: 2 additions & 2 deletions node/client/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -560,12 +560,12 @@ impl sc_client_api::StorageProvider<Block, crate::FullBackend> for Client {
}

impl sp_blockchain::HeaderBackend<Block> for Client {
fn header(&self, id: BlockId<Block>) -> sp_blockchain::Result<Option<Header>> {
fn header(&self, hash: Hash) -> sp_blockchain::Result<Option<Header>> {
with_client! {
self,
client,
{
client.header(&id)
client.header(hash)
}
}
}
Expand Down
9 changes: 3 additions & 6 deletions node/core/chain-api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ use polkadot_node_subsystem::{
messages::ChainApiMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem,
SubsystemError, SubsystemResult,
};
use polkadot_primitives::v2::{Block, BlockId};
use polkadot_primitives::v2::Block;

mod metrics;
use self::metrics::Metrics;
Expand Down Expand Up @@ -99,10 +99,7 @@ where
},
ChainApiMessage::BlockHeader(hash, response_channel) => {
let _timer = subsystem.metrics.time_block_header();
let result = subsystem
.client
.header(BlockId::Hash(hash))
.map_err(|e| e.to_string().into());
let result = subsystem.client.header(hash).map_err(|e| e.to_string().into());
subsystem.metrics.on_request(result.is_ok());
let _ = response_channel.send(result);
},
Expand Down Expand Up @@ -134,7 +131,7 @@ where
let mut hash = hash;

let next_parent = core::iter::from_fn(|| {
let maybe_header = subsystem.client.header(BlockId::Hash(hash));
let maybe_header = subsystem.client.header(hash);
match maybe_header {
// propagate the error
Err(e) => {
Expand Down
18 changes: 7 additions & 11 deletions node/core/chain-api/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,11 @@ impl HeaderBackend<Block> for TestClient {
fn hash(&self, number: BlockNumber) -> sp_blockchain::Result<Option<Hash>> {
Ok(self.finalized_blocks.get(&number).copied())
}
fn header(&self, id: BlockId) -> sp_blockchain::Result<Option<Header>> {
match id {
// for error path testing
BlockId::Hash(hash) if hash.is_zero() =>
Err(sp_blockchain::Error::Backend("Zero hashes are illegal!".into())),
BlockId::Hash(hash) => Ok(self.headers.get(&hash).cloned()),
_ => unreachable!(),
fn header(&self, hash: Hash) -> sp_blockchain::Result<Option<Header>> {
if hash.is_zero() {
Err(sp_blockchain::Error::Backend("Zero hashes are illegal!".into()))
} else {
Ok(self.headers.get(&hash).cloned())
}
}
fn status(&self, _id: BlockId) -> sp_blockchain::Result<sp_blockchain::BlockStatus> {
Expand Down Expand Up @@ -203,10 +201,8 @@ fn request_block_header() {
test_harness(|client, mut sender| {
async move {
const NOT_HERE: Hash = Hash::repeat_byte(0x5);
let test_cases = [
(TWO, client.header(BlockId::Hash(TWO)).unwrap()),
(NOT_HERE, client.header(BlockId::Hash(NOT_HERE)).unwrap()),
];
let test_cases =
[(TWO, client.header(TWO).unwrap()), (NOT_HERE, client.header(NOT_HERE).unwrap())];
for (hash, expected) in &test_cases {
let (tx, rx) = oneshot::channel();

Expand Down
1 change: 0 additions & 1 deletion node/core/parachains-inherent/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,3 @@ polkadot-overseer = { path = "../../overseer" }
polkadot-primitives = { path = "../../../primitives" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
3 changes: 1 addition & 2 deletions node/core/parachains-inherent/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ use polkadot_node_subsystem::{
errors::SubsystemError, messages::ProvisionerMessage, overseer::Handle,
};
use polkadot_primitives::v2::{Block, Hash, InherentData as ParachainsInherentData};
use sp_runtime::generic::BlockId;
use std::{sync::Arc, time};

pub(crate) const LOG_TARGET: &str = "parachain::parachains-inherent";
Expand Down Expand Up @@ -87,7 +86,7 @@ impl<C: sp_blockchain::HeaderBackend<Block>> ParachainsInherentDataProvider<C> {

let mut timeout = futures_timer::Delay::new(PROVISIONER_TIMEOUT).fuse();

let parent_header = match client.header(BlockId::Hash(parent)) {
let parent_header = match client.header(parent) {
Ok(Some(h)) => h,
Ok(None) => return Err(Error::ParentHeaderNotFound(parent)),
Err(err) => return Err(Error::Blockchain(err)),
Expand Down
47 changes: 25 additions & 22 deletions node/service/src/grandpa_support.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,76 +224,79 @@ mod tests {
TestClientBuilder, TestClientBuilderExt,
};
use sp_blockchain::HeaderBackend;
use sp_runtime::{generic::BlockId, traits::Header};
use sp_runtime::traits::Header;
use std::sync::Arc;

#[test]
fn grandpa_pause_voting_rule_works() {
let _ = env_logger::try_init();

let client = Arc::new(TestClientBuilder::new().build());
let mut hashes = vec![];
hashes.push(client.info().genesis_hash);

let mut push_blocks = {
let mut client = client.clone();

move |n| {
move |hashes: &mut Vec<_>, n| {
for _ in 0..n {
let block = client.init_polkadot_block_builder().build().unwrap().block;
hashes.push(block.header.hash());
futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap();
}
}
};

let get_header = {
let client = client.clone();
move |n| client.header(&BlockId::Number(n)).unwrap().unwrap()
move |n| client.expect_header(n).unwrap()
};

// the rule should filter all votes after block #20
// is finalized until block #50 is imported.
let voting_rule = super::PauseAfterBlockFor(20, 30);

// add 10 blocks
push_blocks(10);
push_blocks(&mut hashes, 10);
assert_eq!(client.info().best_number, 10);

// we have not reached the pause block
// therefore nothing should be restricted
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&get_header(0),
&get_header(10),
&get_header(10)
&get_header(hashes[0]),
&get_header(hashes[10]),
&get_header(hashes[10])
)),
None,
);

// add 15 more blocks
// best block: #25
push_blocks(15);
push_blocks(&mut hashes, 15);

// we are targeting the pause block,
// the vote should not be restricted
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&get_header(10),
&get_header(20),
&get_header(20)
&get_header(hashes[10]),
&get_header(hashes[20]),
&get_header(hashes[20])
)),
None,
);

// we are past the pause block, votes should
// be limited to the pause block.
let pause_block = get_header(20);
let pause_block = get_header(hashes[20]);
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&get_header(10),
&get_header(21),
&get_header(21)
&get_header(hashes[10]),
&get_header(hashes[21]),
&get_header(hashes[21])
)),
Some((pause_block.hash(), *pause_block.number())),
);
Expand All @@ -304,24 +307,24 @@ mod tests {
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&pause_block, // #20
&get_header(21),
&get_header(21),
&get_header(hashes[21]),
&get_header(hashes[21]),
)),
Some((pause_block.hash(), *pause_block.number())),
);

// add 30 more blocks
// best block: #55
push_blocks(30);
push_blocks(&mut hashes, 30);

// we're at the last block of the pause, this block
// should still be considered in the pause period
assert_eq!(
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&pause_block, // #20
&get_header(50),
&get_header(50),
&get_header(hashes[50]),
&get_header(hashes[50]),
)),
Some((pause_block.hash(), *pause_block.number())),
);
Expand All @@ -331,8 +334,8 @@ mod tests {
futures::executor::block_on(voting_rule.restrict_vote(
client.clone(),
&pause_block, // #20
&get_header(51),
&get_header(51),
&get_header(hashes[51]),
&get_header(hashes[51]),
)),
None,
);
Expand Down
7 changes: 2 additions & 5 deletions node/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,10 +161,7 @@ where
&self,
hash: Block::Hash,
) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
<Self as sp_blockchain::HeaderBackend<Block>>::header(
self,
generic::BlockId::<Block>::Hash(hash),
)
<Self as sp_blockchain::HeaderBackend<Block>>::header(self, hash)
}
fn number(
&self,
Expand Down Expand Up @@ -701,7 +698,7 @@ where
return None
};

let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash;
let parent_hash = client.header(hash).ok()??.parent_hash;

Some(BlockInfo { hash, parent_hash, number })
})
Expand Down
13 changes: 7 additions & 6 deletions node/test/client/src/block_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use sp_consensus_babe::{
digests::{PreDigest, SecondaryPlainPreDigest},
BABE_ENGINE_ID,
};
use sp_runtime::{generic::BlockId, Digest, DigestItem};
use sp_runtime::{generic::BlockId, traits::Block as BlockT, Digest, DigestItem};
use sp_state_machine::BasicExternalities;

/// An extension for the test client to initialize a Polkadot specific block builder.
Expand All @@ -42,20 +42,21 @@ pub trait InitPolkadotBlockBuilder {
/// which should be the parent block of the block that is being build.
fn init_polkadot_block_builder_at(
&self,
at: &BlockId<Block>,
hash: <Block as BlockT>::Hash,
) -> sc_block_builder::BlockBuilder<Block, Client, FullBackend>;
}

impl InitPolkadotBlockBuilder for Client {
fn init_polkadot_block_builder(&self) -> BlockBuilder<Block, Client, FullBackend> {
let chain_info = self.chain_info();
self.init_polkadot_block_builder_at(&BlockId::Hash(chain_info.best_hash))
self.init_polkadot_block_builder_at(chain_info.best_hash)
}

fn init_polkadot_block_builder_at(
&self,
at: &BlockId<Block>,
hash: <Block as BlockT>::Hash,
) -> BlockBuilder<Block, Client, FullBackend> {
let at = BlockId::Hash(hash);
let last_timestamp =
self.runtime_api().get_last_timestamp(&at).expect("Get last timestamp");

Expand Down Expand Up @@ -87,7 +88,7 @@ impl InitPolkadotBlockBuilder for Client {
};

let mut block_builder = self
.new_block_at(at, digest, false)
.new_block_at(&at, digest, false)
.expect("Creates new block builder for test runtime");

let mut inherent_data = sp_inherents::InherentData::new();
Expand All @@ -97,7 +98,7 @@ impl InitPolkadotBlockBuilder for Client {
.expect("Put timestamp inherent data");

let parent_header = self
.header(at)
.header(hash)
.expect("Get the parent block header")
.expect("The target block header must exist");

Expand Down

0 comments on commit 86c134e

Please sign in to comment.