From 66bf4b2c9db2cdeab76413b0bb892b719253610a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 20:05:14 +0100 Subject: [PATCH 01/79] crate skeleton and type definitions --- Cargo.lock | 15 +++++ Cargo.toml | 1 + node/core/chain-selection/Cargo.toml | 19 ++++++ node/core/chain-selection/src/lib.rs | 95 ++++++++++++++++++++++++++++ 4 files changed, 130 insertions(+) create mode 100644 node/core/chain-selection/Cargo.toml create mode 100644 node/core/chain-selection/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 2722e3cfafd0..e0cf6d5e1bfb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6093,6 +6093,21 @@ dependencies = [ "tracing", ] +[[package]] +name = "polkadot-node-core-chain-selection" +version = "0.1.0" +dependencies = [ + "futures 0.3.15", + "kvdb", + "kvdb-memorydb", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "sp-core", + "tracing", +] + [[package]] name = "polkadot-node-core-dispute-coordinator" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index be022f7b8771..ef5368e58e3c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ members = [ "node/core/bitfield-signing", "node/core/candidate-validation", "node/core/chain-api", + "node/core/chain-selection", "node/core/dispute-coordinator", "node/core/dispute-participation", "node/core/parachains-inherent", diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml new file mode 100644 index 000000000000..afe711413dee --- /dev/null +++ b/node/core/chain-selection/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "polkadot-node-core-chain-selection" +description = "Chain Selection Subsystem" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.15" +tracing = "0.1.26" +polkadot-primitives = { path = "../../../primitives" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } +kvdb = "0.9.0" + +[dev-dependencies] +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +kvdb-memorydb = "0.9.0" diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs new file mode 100644 index 000000000000..e10a130ff4cc --- /dev/null +++ b/node/core/chain-selection/src/lib.rs @@ -0,0 +1,95 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements the Chain Selection Subsystem. + +use polkadot_primitives::v1::Hash; +use polkadot_subsystem::{ + Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, +}; + +type Weight = u64; +type Timestamp = u64; + +enum Approval { + // Approved + Approved, + // Unapproved but not stagnant + Unapproved, + // Unapproved and stagnant. + Stagnant, +} + +impl Approval { + fn is_stagnant(&self) -> bool { + match *self { + Approval::Stagnant => true, + _ => false, + } + } +} + +struct ViabilityCriteria { + // Whether this block has been explicitly reverted by one of its descendants. + explicitly_reverted: bool, + // `None` means approved. `Some` means unapproved. + approval: Approval, + earliest_non_viable_ancestor: Option, +} + +impl ViabilityCriteria { + fn is_viable(&self) -> bool { + self.earliest_non_viable_ancestor.is_none() + && !self.explicitly_reverted + && !self.approval.is_stagnant() + } +} + +struct LeafEntry { + weight: Weight, + block_hash: Hash, +} + +struct BlockEntry { + block_hash: Hash, + parent_hash: Hash, + children: Vec, + viability: ViabilityCriteria, + weight: Weight, +} + +enum BackendWriteOp { + WriteBlockEntry(Hash, BlockEntry), + DeleteBlockEntry(Hash), + WriteActiveLeaves(Vec), +} + +// An abstraction over backend for the logic of this subsystem. +trait Backend { + /// The error type of this backend, which is assumed to indicate a + /// fatal database error. + type Error: Into; + + /// Load a block entry from the DB. + fn load_block_entry(&self) -> Result; + /// Load the active-leaves set. + fn load_leaves(&self) -> Result, Self::Error>; + /// Load all stagnant lists up to and including the given unix timestamp. + fn load_stagnant_up_to(&self, up_to: Timestamp) -> Result)>, Self::Error>; + + /// Atomically write the list of operations, with later operations taking precedence over prior. + fn write(&self, ops: Vec) -> Result<(), Self::Error>; +} From dfb09e54d3194432addf6b3d2d4a28f1c1ef9f1e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 20:15:42 +0100 Subject: [PATCH 02/79] add ChainSelectionMessage --- node/network/bridge/src/tests.rs | 1 + node/overseer/src/lib.rs | 3 +++ node/subsystem/src/messages.rs | 29 +++++++++++++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/node/network/bridge/src/tests.rs b/node/network/bridge/src/tests.rs index 2321b3bf5f80..698b8a1b2785 100644 --- a/node/network/bridge/src/tests.rs +++ b/node/network/bridge/src/tests.rs @@ -1252,6 +1252,7 @@ fn spread_event_to_subsystems_is_up_to_date() { AllMessages::GossipSupport(_) => unreachable!("Not interested in network events"), AllMessages::DisputeCoordinator(_) => unreachable!("Not interested in network events"), AllMessages::DisputeParticipation(_) => unreachable!("Not interetsed in network events"), + AllMessages::ChainSelection(_) => unreachable!("Not interested in network events"), // Add variants here as needed, `{ cnt += 1; }` for those that need to be // notified, `unreachable!()` for those that should not. } diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 13461a060742..2ce51d2d7849 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -631,6 +631,7 @@ impl ChannelsOut { }, AllMessages::DisputeCoordinator(_) => Ok(()), AllMessages::DisputeParticipation(_) => Ok(()), + AllMessages::ChainSelection(_) => Ok(()), }; if res.is_err() { @@ -735,6 +736,7 @@ impl ChannelsOut { }, AllMessages::DisputeCoordinator(_) => Ok(()), AllMessages::DisputeParticipation(_) => Ok(()), + AllMessages::ChainSelection(_) => Ok(()), }; if res.is_err() { @@ -2068,6 +2070,7 @@ where }, AllMessages::DisputeCoordinator(_) => {} AllMessages::DisputeParticipation(_) => {} + AllMessages::ChainSelectionMessage(_) => {} } Ok(()) diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index f8ae3dead6ff..7d48908c5316 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -490,6 +490,32 @@ impl ChainApiMessage { } } +/// Chain selection subsystem messages +#[derive(Debug)] +pub enum ChainSelectionMessage { + /// Signal to the chain selection subsystem that a specific block has been approved. + Approved(Hash), + /// Request the leaves in descending order by score. + Leaves(oneshot::Sender>), + /// Request the best leaf containing the given block in its ancestry. Return `None` if + /// there is no such leaf. + BestLeafContaining(Hash, oneshot::Sender>), +} + +impl ChainSelectionMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + // None of the messages, even the ones containing specific + // block hashes, can be considered to have those blocks as + // a relay parent. + match *self { + ChainSelectionMessage::Approved(_) => None, + ChainSelectionMessage::Leaves(_) => None, + ChainSelectionMessage::BestLeafContaining(..) => None, + } + } +} + /// A sender for the result of a runtime API request. pub type RuntimeApiSender = oneshot::Sender>; @@ -816,6 +842,9 @@ pub enum AllMessages { /// Message for the dispute participation subsystem. #[skip] DisputeParticipation(DisputeParticipationMessage), + /// Message for the chain selection subsystem. + #[skip] + ChainSelection(ChainSelectionMessage), } impl From> for AvailabilityDistributionMessage { From 5a474eb0e84ffb044504cfef4f59f25951819321 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 20:19:47 +0100 Subject: [PATCH 03/79] add error type --- Cargo.lock | 2 ++ node/core/chain-selection/Cargo.toml | 2 ++ node/core/chain-selection/src/lib.rs | 49 +++++++++++++++++++++++----- 3 files changed, 45 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0cf6d5e1bfb..2096ed608afa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6100,11 +6100,13 @@ dependencies = [ "futures 0.3.15", "kvdb", "kvdb-memorydb", + "parity-scale-codec", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-core", + "thiserror", "tracing", ] diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml index afe711413dee..50e1799f9f2e 100644 --- a/node/core/chain-selection/Cargo.toml +++ b/node/core/chain-selection/Cargo.toml @@ -12,6 +12,8 @@ polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.9.0" +thiserror = "1.0.23" +parity-scale-codec = "2" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index e10a130ff4cc..35b40abec437 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -19,8 +19,15 @@ use polkadot_primitives::v1::Hash; use polkadot_subsystem::{ Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, + messages::{ChainSelectionMessage, ChainApiMessage}, + errors::ChainApiError, }; +use parity_scale_codec::Error as CodecError; +use futures::channel::oneshot; + +const LOG_TARGET: &str = "parachain::chain-selection"; + type Weight = u64; type Timestamp = u64; @@ -71,6 +78,36 @@ struct BlockEntry { weight: Weight, } +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error { + #[error(transparent)] + ChainApi(#[from] ChainApiError), + + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Oneshot(#[from] oneshot::Canceled), + + #[error(transparent)] + Subsystem(#[from] SubsystemError), + + #[error(transparent)] + Codec(#[from] CodecError), +} + +impl Error { + fn trace(&self) { + match self { + // don't spam the log with spurious errors + Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self), + // it's worth reporting otherwise + _ => tracing::warn!(target: LOG_TARGET, err = ?self), + } + } +} + enum BackendWriteOp { WriteBlockEntry(Hash, BlockEntry), DeleteBlockEntry(Hash), @@ -79,17 +116,13 @@ enum BackendWriteOp { // An abstraction over backend for the logic of this subsystem. trait Backend { - /// The error type of this backend, which is assumed to indicate a - /// fatal database error. - type Error: Into; - /// Load a block entry from the DB. - fn load_block_entry(&self) -> Result; + fn load_block_entry(&self) -> Result; /// Load the active-leaves set. - fn load_leaves(&self) -> Result, Self::Error>; + fn load_leaves(&self) -> Result, Error>; /// Load all stagnant lists up to and including the given unix timestamp. - fn load_stagnant_up_to(&self, up_to: Timestamp) -> Result)>, Self::Error>; + fn load_stagnant_up_to(&self, up_to: Timestamp) -> Result)>, Error>; /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&self, ops: Vec) -> Result<(), Self::Error>; + fn write(&self, ops: Vec) -> Result<(), Error>; } From 9c2340b68054957793db424131d29c82b1b271d4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 20:25:09 +0100 Subject: [PATCH 04/79] run loop --- node/core/chain-selection/src/lib.rs | 59 +++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 35b40abec437..adb5563c6f02 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -19,6 +19,7 @@ use polkadot_primitives::v1::Hash; use polkadot_subsystem::{ Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, + OverseerSignal, FromOverseer, messages::{ChainSelectionMessage, ChainApiMessage}, errors::ChainApiError, }; @@ -124,5 +125,61 @@ trait Backend { fn load_stagnant_up_to(&self, up_to: Timestamp) -> Result)>, Error>; /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&self, ops: Vec) -> Result<(), Error>; + fn write(&mut self, ops: Vec) -> Result<(), Error>; +} + +async fn run(mut ctx: Context, mut backend: B) + where + Context: SubsystemContext, + B: Backend, +{ + loop { + let res = run_iteration(&mut ctx, &mut backend).await; + match res { + Err(e) => { + e.trace(); + + if let Error::Subsystem(SubsystemError::Context(_)) = e { + break; + } + } + Ok(()) => { + tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); + break; + } + } + } +} + +// Run the subsystem until an error is encountered or a `conclude` signal is received. +// Most errors are non-fatal and should lead to another call to this function. +// +// A return value of `Ok` indicates that an exit should be made, while non-fatal errors +// lead to another call to this function. +async fn run_iteration(ctx: &mut Context, backend: &mut B) + -> Result<(), Error> + where + Context: SubsystemContext, + B: Backend, +{ + loop { + let ops: Vec = match ctx.recv().await? { + FromOverseer::Signal(OverseerSignal::Conclude) => { + return Ok(()) + } + FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { + unimplemented!() + } + FromOverseer::Signal(OverseerSignal::BlockFinalized(_, _)) => { + unimplemented!() + } + FromOverseer::Communication { msg } => { + unimplemented!() + } + }; + + if !ops.is_empty() { + backend.write(ops)?; + } + } } From 3c49a32a322a7580e343134bb0fd0b0254db98ae Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 22:11:57 +0100 Subject: [PATCH 05/79] fix overseer --- node/overseer/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 2ce51d2d7849..5f2422fc73df 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -2070,7 +2070,7 @@ where }, AllMessages::DisputeCoordinator(_) => {} AllMessages::DisputeParticipation(_) => {} - AllMessages::ChainSelectionMessage(_) => {} + AllMessages::ChainSelection(_) => {} } Ok(()) From 06e92a603aedefeda1e2a4e1aabf4ac017d1747e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 22:12:43 +0100 Subject: [PATCH 06/79] simplify determine_new_blocks API --- node/subsystem-util/src/determine_new_blocks.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/node/subsystem-util/src/determine_new_blocks.rs b/node/subsystem-util/src/determine_new_blocks.rs index 205ca1d8f1dc..f77b598f50d8 100644 --- a/node/subsystem-util/src/determine_new_blocks.rs +++ b/node/subsystem-util/src/determine_new_blocks.rs @@ -17,8 +17,7 @@ //! A utility for fetching all unknown blocks based on a new chain-head hash. use polkadot_node_subsystem::{ - messages::ChainApiMessage, - SubsystemSender, SubsystemError, SubsystemResult, + messages::ChainApiMessage, SubsystemSender, }; use polkadot_primitives::v1::{Hash, Header, BlockNumber}; use futures::prelude::*; @@ -41,9 +40,7 @@ pub async fn determine_new_blocks( head: Hash, header: &Header, lower_bound_number: BlockNumber, -) -> SubsystemResult> - where SubsystemError: From -{ +) -> Result, E> { const ANCESTRY_STEP: usize = 4; // Early exit if the block is in the DB or too early. @@ -151,7 +148,7 @@ mod tests { self.blocks.insert(hash); } - fn is_known(&self, hash: &Hash) -> Result { + fn is_known(&self, hash: &Hash) -> Result { Ok(self.blocks.contains(hash)) } } From a2b02500c504f93c4cc267128095c91bbb50e379 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 23:23:21 +0100 Subject: [PATCH 07/79] write an overlay struct and fetch new blocks --- node/core/chain-selection/src/lib.rs | 137 +++++++++++++++++++++++++-- 1 file changed, 130 insertions(+), 7 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index adb5563c6f02..ab7fda8fb680 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -16,7 +16,7 @@ //! Implements the Chain Selection Subsystem. -use polkadot_primitives::v1::Hash; +use polkadot_primitives::v1::{BlockNumber, Hash, Header}; use polkadot_subsystem::{ Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, OverseerSignal, FromOverseer, @@ -27,11 +27,14 @@ use polkadot_subsystem::{ use parity_scale_codec::Error as CodecError; use futures::channel::oneshot; +use std::collections::HashMap; + const LOG_TARGET: &str = "parachain::chain-selection"; type Weight = u64; type Timestamp = u64; +#[derive(Debug, Clone)] enum Approval { // Approved Approved, @@ -50,6 +53,7 @@ impl Approval { } } +#[derive(Debug, Clone)] struct ViabilityCriteria { // Whether this block has been explicitly reverted by one of its descendants. explicitly_reverted: bool, @@ -66,11 +70,13 @@ impl ViabilityCriteria { } } +#[derive(Debug, Clone)] struct LeafEntry { weight: Weight, block_hash: Hash, } +#[derive(Debug, Clone)] struct BlockEntry { block_hash: Hash, parent_hash: Hash, @@ -111,23 +117,91 @@ impl Error { enum BackendWriteOp { WriteBlockEntry(Hash, BlockEntry), - DeleteBlockEntry(Hash), + WriteBlocksByNumber(BlockNumber, Vec), WriteActiveLeaves(Vec), + WriteStagnantAt(Timestamp, Vec), + DeleteBlocksByNumber(BlockNumber), + DeleteBlockEntry(Hash), } // An abstraction over backend for the logic of this subsystem. trait Backend { /// Load a block entry from the DB. - fn load_block_entry(&self) -> Result; + fn load_block_entry(&self, hash: &Hash) -> Result, Error>; /// Load the active-leaves set. fn load_leaves(&self) -> Result, Error>; + /// Load the stagnant list at the given timestamp. + fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error>; /// Load all stagnant lists up to and including the given unix timestamp. - fn load_stagnant_up_to(&self, up_to: Timestamp) -> Result)>, Error>; + fn load_stagnant_at_up_to(&self, up_to: Timestamp) + -> Result)>, Error>; + /// Load the earliest kept block number. + fn load_first_block_number(&self) -> Result, Error>; + /// Load blocks by number. + fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error>; /// Atomically write the list of operations, with later operations taking precedence over prior. fn write(&mut self, ops: Vec) -> Result<(), Error>; } +// An in-memory overlay over the backend. +struct OverlayedBackend<'a, B: 'a> { + inner: &'a B, + + // `None` means 'deleted', missing means query inner. + block_entries: HashMap>, + // `None` means 'deleted', missing means query inner. + blocks_by_number: HashMap>>, +} + +impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { + fn load_block_entry(&self, hash: &Hash) -> Result, Error> { + if let Some(val) = self.block_entries.get(&hash) { + return Ok(val.clone()) + } + + self.inner.load_block_entry(hash) + } + + fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { + if let Some(val) = self.blocks_by_number.get(&number) { + return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); + } + + self.inner.load_blocks_by_number(number) + } + + fn write_block_entry(&mut self, hash: Hash, entry: BlockEntry) { + self.block_entries.insert(hash, Some(entry)); + } + + fn delete_block_entry(&mut self, hash: &Hash) { + self.block_entries.remove(hash); + } + + fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec) { + self.blocks_by_number.insert(number, Some(blocks)); + } + + fn delete_blocks_by_number(&mut self, number: BlockNumber) { + self.blocks_by_number.insert(number, None); + } + + fn into_write_ops(self) -> impl Iterator { + let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { + Some(v) => BackendWriteOp::WriteBlockEntry(h, v), + None => BackendWriteOp::DeleteBlockEntry(h), + }); + + let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v { + Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v), + None => BackendWriteOp::DeleteBlocksByNumber(n), + }); + + block_entry_ops.chain(blocks_by_number_ops) + } +} + async fn run(mut ctx: Context, mut backend: B) where Context: SubsystemContext, @@ -163,7 +237,7 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) B: Backend, { loop { - let ops: Vec = match ctx.recv().await? { + match ctx.recv().await? { FromOverseer::Signal(OverseerSignal::Conclude) => { return Ok(()) } @@ -177,9 +251,58 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) unimplemented!() } }; + } +} + +async fn fetch_finalized_number( + ctx: &mut impl SubsystemContext, +) -> Result { + unimplemented!() +} - if !ops.is_empty() { - backend.write(ops)?; +async fn fetch_header( + ctx: &mut impl SubsystemContext, + hash: Hash, +) -> Result, Error> { + let (h_tx, h_rx) = oneshot::channel(); + ctx.send_message(ChainApiMessage::BlockHeader(hash, h_tx).into()).await; + + match h_rx.await?? { + None => { + tracing::warn!( + target: LOG_TARGET, + ?hash, + "Missing header for new head", + ); + Ok(None) } + Some(h) => Ok(Some(h)), } } + +// Handle a new active leaf. +async fn handle_active_leaf( + ctx: &mut impl SubsystemContext, + backend: &impl Backend, + hash: Hash, +) -> Result, Error> { + let lower_bound = match backend.load_first_block_number()? { + Some(l) => l, + None => fetch_finalized_number(ctx).await?, + }; + + let header = match fetch_header(ctx, hash).await? { + None => return Ok(Vec::new()), + Some(h) => h, + }; + + let new_blocks = polkadot_node_subsystem_util::determine_new_blocks( + ctx.sender(), + |h| backend.load_block_entry(h).map(|b| b.is_some()), + hash, + &header, + lower_bound, + ).await?; + + unimplemented!() +} From 8790e524d1a0d6018bcee3dbcd1424e607a841e8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 23:31:11 +0100 Subject: [PATCH 08/79] add new function to overlay --- node/core/chain-selection/src/lib.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index ab7fda8fb680..c68498e8a4f9 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -155,6 +155,14 @@ struct OverlayedBackend<'a, B: 'a> { } impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { + fn new(backend: &'a B) -> Self { + OverlayedBackend { + inner: backend, + block_entries: HashMap::new(), + blocks_by_number: HashMap::new(), + } + } + fn load_block_entry(&self, hash: &Hash) -> Result, Error> { if let Some(val) = self.block_entries.get(&hash) { return Ok(val.clone()) From d0f0f4565c098ebacb10e69900ace0c90f1ca961 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 23:34:13 +0100 Subject: [PATCH 09/79] more flow --- node/core/chain-selection/src/lib.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index c68498e8a4f9..b2b7bc182771 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -312,5 +312,21 @@ async fn handle_active_leaf( lower_bound, ).await?; + let mut overlay = OverlayedBackend::new(backend); + + // determine_new_blocks gives blocks in descending order. + // for this, we want ascending order. + for (hash, header) in new_blocks.into_iter().rev() { + import_block(&mut overlay, hash, header)?; + } + + Ok(overlay.into_write_ops().collect()) +} + +fn import_block( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_header: Header, +) -> Result<(), Error> { unimplemented!() } From 60196bf76e5f25c69493129dceb01955b926672f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 23:43:44 +0100 Subject: [PATCH 10/79] add leaves to overlay and add a strong type around leaves-set --- node/core/chain-selection/src/lib.rs | 53 ++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index b2b7bc182771..23a10740d72e 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -76,6 +76,36 @@ struct LeafEntry { block_hash: Hash, } +#[derive(Debug, Clone)] +struct LeafEntrySet { + inner: Vec +} + +impl LeafEntrySet { + fn contains(&self, hash: &Hash) -> bool { + self.inner.iter().position(|e| &e.block_hash == hash).is_some() + } + + fn remove(&mut self, hash: &Hash) -> bool { + match self.inner.iter().position(|e| &e.block_hash == hash) { + None => false, + Some(i) => { + self.inner.remove(i); + true + } + } + } + + fn insert(&mut self, new: LeafEntry) { + match self.inner.iter().position(|e| e.weight < new.weight) { + None => self.inner.push(new), + Some(i) => if self.inner[i].block_hash != new.block_hash { + self.inner.insert(i, new); + } + } + } +} + #[derive(Debug, Clone)] struct BlockEntry { block_hash: Hash, @@ -118,7 +148,7 @@ impl Error { enum BackendWriteOp { WriteBlockEntry(Hash, BlockEntry), WriteBlocksByNumber(BlockNumber, Vec), - WriteActiveLeaves(Vec), + WriteViableLeaves(LeafEntrySet), WriteStagnantAt(Timestamp, Vec), DeleteBlocksByNumber(BlockNumber), DeleteBlockEntry(Hash), @@ -129,7 +159,7 @@ trait Backend { /// Load a block entry from the DB. fn load_block_entry(&self, hash: &Hash) -> Result, Error>; /// Load the active-leaves set. - fn load_leaves(&self) -> Result, Error>; + fn load_leaves(&self) -> Result; /// Load the stagnant list at the given timestamp. fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error>; /// Load all stagnant lists up to and including the given unix timestamp. @@ -152,6 +182,8 @@ struct OverlayedBackend<'a, B: 'a> { block_entries: HashMap>, // `None` means 'deleted', missing means query inner. blocks_by_number: HashMap>>, + // 'None' means query inner. + leaves: Option, } impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { @@ -160,6 +192,7 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { inner: backend, block_entries: HashMap::new(), blocks_by_number: HashMap::new(), + leaves: None, } } @@ -179,6 +212,14 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { self.inner.load_blocks_by_number(number) } + fn load_leaves(&self) -> Result { + if let Some(ref set) = self.leaves { + return Ok(set.clone()) + } + + self.inner.load_leaves() + } + fn write_block_entry(&mut self, hash: Hash, entry: BlockEntry) { self.block_entries.insert(hash, Some(entry)); } @@ -195,6 +236,10 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { self.blocks_by_number.insert(number, None); } + fn write_leaves(&mut self, leaves: LeafEntrySet) { + self.leaves = Some(leaves); + } + fn into_write_ops(self) -> impl Iterator { let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { Some(v) => BackendWriteOp::WriteBlockEntry(h, v), @@ -206,7 +251,9 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { None => BackendWriteOp::DeleteBlocksByNumber(n), }); - block_entry_ops.chain(blocks_by_number_ops) + let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves); + + block_entry_ops.chain(blocks_by_number_ops).chain(leaf_ops) } } From a023348db89223612ff7eb3da44bd1271d77ccb4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Jun 2021 23:44:52 +0100 Subject: [PATCH 11/79] add is_parent_viable --- node/core/chain-selection/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 23a10740d72e..6c577869cdb3 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -64,10 +64,14 @@ struct ViabilityCriteria { impl ViabilityCriteria { fn is_viable(&self) -> bool { - self.earliest_non_viable_ancestor.is_none() + self.is_parent_viable() && !self.explicitly_reverted && !self.approval.is_stagnant() } + + fn is_parent_viable(&self) -> bool { + self.earliest_non_viable_ancestor.is_none() + } } #[derive(Debug, Clone)] From 537e59bb0b4fd880040dc7eeabd394a4c9e118cb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 00:02:28 +0100 Subject: [PATCH 12/79] implement block import, ignoring reversions --- node/core/chain-selection/src/lib.rs | 72 +++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 6c577869cdb3..9283beb899b1 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -119,6 +119,16 @@ struct BlockEntry { weight: Weight, } +impl BlockEntry { + fn non_viable_ancestor_for_child(&self) -> Option { + if self.viability.is_viable() { + None + } else { + self.viability.earliest_non_viable_ancestor.or(Some(self.block_hash)) + } + } +} + #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { @@ -368,7 +378,8 @@ async fn handle_active_leaf( // determine_new_blocks gives blocks in descending order. // for this, we want ascending order. for (hash, header) in new_blocks.into_iter().rev() { - import_block(&mut overlay, hash, header)?; + let weight = unimplemented!(); + import_block(&mut overlay, hash, header, weight)?; } Ok(overlay.into_write_ops().collect()) @@ -378,6 +389,63 @@ fn import_block( backend: &mut OverlayedBackend, block_hash: Hash, block_header: Header, + weight: Weight, ) -> Result<(), Error> { - unimplemented!() + import_block_ignoring_reversions(backend, block_hash, block_header, weight)?; + + // TODO [now]: apply reversions. + + Ok(()) +} + +fn import_block_ignoring_reversions( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_header: Header, + weight: Weight, +) -> Result<(), Error> { + let parent_hash = block_header.parent_hash; + + let mut leaves = backend.load_leaves()?; + let parent_entry = backend.load_block_entry(&parent_hash)?; + + let inherited_viability = parent_entry.as_ref() + .and_then(|parent| parent.non_viable_ancestor_for_child()); + + // 1. Add the block to the DB assuming it's not reverted. + backend.write_block_entry( + block_hash, + BlockEntry { + block_hash, + parent_hash: parent_hash, + children: Vec::new(), + viability: ViabilityCriteria { + earliest_non_viable_ancestor: inherited_viability, + explicitly_reverted: false, + approval: Approval::Unapproved, + }, + weight, + } + ); + + // 2. Update leaves if parent was a viable leaf or the parent is unknown. + if leaves.remove(&parent_hash) || parent_entry.is_none() { + leaves.insert(LeafEntry { block_hash, weight }); + backend.write_leaves(leaves); + } + + // 3. Update and write the parent + if let Some(mut parent_entry) = parent_entry { + parent_entry.children.push(block_hash); + backend.write_block_entry(parent_hash, parent_entry); + } + + // 4. Add to blocks-by-number. + let mut blocks_by_number = backend.load_blocks_by_number(block_header.number)?; + blocks_by_number.push(block_hash); + backend.write_blocks_by_number(block_header.number, blocks_by_number); + + // 5. TODO [now]: Add stagnation timeout. + + Ok(()) } From 22705aa2fb9c5c7ec1cdd00d1dc0d45ff2984f3c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 00:08:33 +0100 Subject: [PATCH 13/79] add stagnant-at to overlay --- node/core/chain-selection/src/lib.rs | 30 +++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 9283beb899b1..60017b134ce8 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -166,6 +166,7 @@ enum BackendWriteOp { WriteStagnantAt(Timestamp, Vec), DeleteBlocksByNumber(BlockNumber), DeleteBlockEntry(Hash), + DeleteStagnantAt(Timestamp), } // An abstraction over backend for the logic of this subsystem. @@ -196,6 +197,8 @@ struct OverlayedBackend<'a, B: 'a> { block_entries: HashMap>, // `None` means 'deleted', missing means query inner. blocks_by_number: HashMap>>, + // 'None' means 'deleted', missing means query inner. + stagnant_at: HashMap>>, // 'None' means query inner. leaves: Option, } @@ -206,6 +209,7 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { inner: backend, block_entries: HashMap::new(), blocks_by_number: HashMap::new(), + stagnant_at: HashMap::new(), leaves: None, } } @@ -234,6 +238,14 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { self.inner.load_leaves() } + fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error> { + if let Some(val) = self.stagnant_at.get(×tamp) { + return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); + } + + self.inner.load_stagnant_at(timestamp) + } + fn write_block_entry(&mut self, hash: Hash, entry: BlockEntry) { self.block_entries.insert(hash, Some(entry)); } @@ -254,6 +266,14 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { self.leaves = Some(leaves); } + fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec) { + self.stagnant_at.insert(timestamp, Some(hashes)); + } + + fn delete_stagnant_at(&mut self, timestamp: Timestamp) { + self.stagnant_at.insert(timestamp, None); + } + fn into_write_ops(self) -> impl Iterator { let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { Some(v) => BackendWriteOp::WriteBlockEntry(h, v), @@ -267,7 +287,15 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves); - block_entry_ops.chain(blocks_by_number_ops).chain(leaf_ops) + let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v { + Some(v) => BackendWriteOp::WriteStagnantAt(n, v), + None => BackendWriteOp::DeleteStagnantAt(n), + }); + + block_entry_ops + .chain(blocks_by_number_ops) + .chain(leaf_ops) + .chain(stagnant_at_ops) } } From c71d5b2179283860ff77d2078b6202df6b88fac3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 01:06:33 +0100 Subject: [PATCH 14/79] add stagnant --- node/core/chain-selection/src/lib.rs | 30 +++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 60017b134ce8..044680a0f455 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -28,6 +28,7 @@ use parity_scale_codec::Error as CodecError; use futures::channel::oneshot; use std::collections::HashMap; +use std::time::{UNIX_EPOCH, SystemTime}; const LOG_TARGET: &str = "parachain::chain-selection"; @@ -159,6 +160,29 @@ impl Error { } } +fn timestamp_now() -> Timestamp { + match SystemTime::now().duration_since(UNIX_EPOCH) { + Ok(d) => d.as_secs(), + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Current time is before unix epoch. Validation will not work correctly." + ); + + 0 + } + } +} + +fn stagnant_timeout_from_now() -> Timestamp { + // If a block isn't approved in 120 seconds, nodes will abandon it + // and begin building on another chain. + const STAGNANT_TIMEOUT: Timestamp = 120; + + timestamp_now() + STAGNANT_TIMEOUT +} + enum BackendWriteOp { WriteBlockEntry(Hash, BlockEntry), WriteBlocksByNumber(BlockNumber, Vec), @@ -473,7 +497,11 @@ fn import_block_ignoring_reversions( blocks_by_number.push(block_hash); backend.write_blocks_by_number(block_header.number, blocks_by_number); - // 5. TODO [now]: Add stagnation timeout. + // 5. Add stagnation timeout. + let stagnant_at = stagnant_timeout_from_now(); + let mut stagnant_at_list = backend.load_stagnant_at(stagnant_at)?; + stagnant_at_list.push(block_hash); + backend.write_stagnant_at(stagnant_at, stagnant_at_list); Ok(()) } From 422afc4ca19c9c06a43016612c592942c920f000 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 02:39:40 +0100 Subject: [PATCH 15/79] add revert consensus log --- primitives/src/v1/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/primitives/src/v1/mod.rs b/primitives/src/v1/mod.rs index dbc4a7ceabc0..0fe6ed84702f 100644 --- a/primitives/src/v1/mod.rs +++ b/primitives/src/v1/mod.rs @@ -1046,6 +1046,16 @@ pub enum ConsensusLog { /// number in the current chain, inclusive. #[codec(index = 3)] ForceApprove(BlockNumber), + /// A signal to revert the block number in the same chain as the + /// header this digest is part of and all of its descendents. + /// + /// It is a no-op for a block to contain a revert digest targeting + /// its own number or a higher number. + /// + /// In practice, these are issued when on-chain logic has detected an + /// invalid parachain block within its own chain, due to a dispute. + #[codec(index = 4)] + Revert(BlockNumber) } impl ConsensusLog { From 70e5d8a660a7fcfe28761956445cc3a13d35cfe6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 02:40:10 +0100 Subject: [PATCH 16/79] flow for reversions --- node/core/chain-selection/src/lib.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 044680a0f455..b142926f832a 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -444,8 +444,7 @@ fn import_block( weight: Weight, ) -> Result<(), Error> { import_block_ignoring_reversions(backend, block_hash, block_header, weight)?; - - // TODO [now]: apply reversions. + apply_imported_block_reversions(backend, block_hash, block_header)?; Ok(()) } @@ -505,3 +504,15 @@ fn import_block_ignoring_reversions( Ok(()) } + +// Assuming that a block is already imported, scans the header of the block +// for revert signals and applies those to relevant ancestors, and recursively +// updates the viability of those ancestors' descendants. +fn apply_imported_block_reversions( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_header: Header, +) -> Result<(), Error> { + // Scan for reversion digests. + unimplemented!() +} From a792b1c2b0a87cced212631999390d174a128a82 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 04:13:49 +0100 Subject: [PATCH 17/79] extract and import block reversions --- node/core/chain-selection/src/lib.rs | 159 +++++++++++++++++++++++++-- 1 file changed, 149 insertions(+), 10 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index b142926f832a..d80d72f3317d 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -16,7 +16,7 @@ //! Implements the Chain Selection Subsystem. -use polkadot_primitives::v1::{BlockNumber, Hash, Header}; +use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; use polkadot_subsystem::{ Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, OverseerSignal, FromOverseer, @@ -65,11 +65,16 @@ struct ViabilityCriteria { impl ViabilityCriteria { fn is_viable(&self) -> bool { - self.is_parent_viable() - && !self.explicitly_reverted - && !self.approval.is_stagnant() + self.is_parent_viable() && !self.is_unviable_source() } + // Whether the current block is a source of unviability itself. + // That is, whether the current block is reverted or stagnant. + fn is_unviable_source(&self) -> bool { + self.explicitly_reverted || self.approval.is_stagnant() + } + + // Whether the parent is viable. fn is_parent_viable(&self) -> bool { self.earliest_non_viable_ancestor.is_none() } @@ -194,6 +199,7 @@ enum BackendWriteOp { } // An abstraction over backend for the logic of this subsystem. +// TODO [now]: extract to submodule trait Backend { /// Load a block entry from the DB. fn load_block_entry(&self, hash: &Hash) -> Result, Error>; @@ -443,8 +449,8 @@ fn import_block( block_header: Header, weight: Weight, ) -> Result<(), Error> { - import_block_ignoring_reversions(backend, block_hash, block_header, weight)?; - apply_imported_block_reversions(backend, block_hash, block_header)?; + import_block_ignoring_reversions(backend, block_hash, &block_header, weight)?; + apply_imported_block_reversions(backend, block_hash, &block_header)?; Ok(()) } @@ -452,7 +458,7 @@ fn import_block( fn import_block_ignoring_reversions( backend: &mut OverlayedBackend, block_hash: Hash, - block_header: Header, + block_header: &Header, weight: Weight, ) -> Result<(), Error> { let parent_hash = block_header.parent_hash; @@ -505,14 +511,147 @@ fn import_block_ignoring_reversions( Ok(()) } +// Extract all reversion logs from a header in ascending order. +// +// Ignores logs with number >= the block header number. +fn extract_reversion_logs(header: &Header) -> Vec { + let number = header.number; + let mut logs = header.digest.logs() + .iter() + .enumerate() + .filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) { + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + index = i, + block_hash = ?header.hash(), + "Digest item failed to encode" + ); + + None + } + Ok(Some(ConsensusLog::Revert(b))) => if b < number { + Some(b) + } else { + tracing::warn!( + target: LOG_TARGET, + revert_target = b, + block_number = number, + block_hash = ?header.hash(), + "Block issued invalid revert digest targeting itself or future" + ); + + None + } + Ok(_) => None, + }) + .collect::>(); + + logs.sort(); + + logs +} + +// Load the given ancestor's block entry, in descending order from the `block_hash`. +// The ancestor_number must be at least one block less than the `block_number`. +// +// The returned entry will be `None` if the range is invalid or any block in the path had +// no entry present. If any block entry was missing, it can safely be assumed to +// be finalized. +fn load_ancestor( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_number: BlockNumber, + ancestor_number: BlockNumber, +) -> Result, Error> { + if block_number <= ancestor_number { return Ok(None) } + + let mut current_hash = block_hash; + let mut current_entry = None; + + let segment_length = (block_number - ancestor_number) + 1; + for _ in std::iter::repeat(()).take(segment_length as usize) { + match backend.load_block_entry(¤t_hash)? { + None => return Ok(None), + Some(entry) => { + let parent_hash = entry.parent_hash; + current_entry = Some(entry); + current_hash = parent_hash; + } + } + } + + // Current entry should always be `Some` here. + Ok(current_entry) +} + +// Propagate viability update to descendants of the given block. +// +// If the block entry provided is self-unviable, then it's assumed that an +// unviability update needs to be propagated to descendants. +// +// If the block entry provided is self-viable, then it's assumed that a +// viability update needs to be propagated to descendants. +fn propagate_viability_change( + backend: &mut OverlayedBackend, + entry: &BlockEntry, +) -> Result<(), Error> { + unimplemented!(); +} + // Assuming that a block is already imported, scans the header of the block // for revert signals and applies those to relevant ancestors, and recursively // updates the viability of those ancestors' descendants. fn apply_imported_block_reversions( backend: &mut OverlayedBackend, block_hash: Hash, - block_header: Header, + block_header: &Header, ) -> Result<(), Error> { - // Scan for reversion digests. - unimplemented!() + let logs = extract_reversion_logs(&block_header); + + // Note: since revert numbers are returned from `extract_reversion_logs` + // in ascending order, the expensive propagation of unviability is + // only heavy on the first log. + for revert_number in logs { + let mut ancestor_entry = match load_ancestor( + backend, + block_hash, + block_header.number, + revert_number, + )? { + None => { + tracing::warn!( + target: LOG_TARGET, + ?block_hash, + block_number = block_header.number, + revert_target = revert_number, + "The hammer has dropped. \ + A block has indicated that its finalized ancestor be reverted. \ + Please inform an adult.", + ); + + continue + } + Some(ancestor_entry) => { + tracing::info!( + target: LOG_TARGET, + ?block_hash, + block_number = block_header.number, + revert_target = revert_number, + revert_hash = ?ancestor_entry.block_hash, + "A block has signaled that its ancestor be reverted due to a bad parachain block.", + ); + + ancestor_entry + } + }; + + ancestor_entry.viability.explicitly_reverted = true; + backend.write_block_entry(ancestor_entry.block_hash, ancestor_entry.clone()); + + propagate_viability_change(backend, &ancestor_entry)?; + } + + Ok(()) } From 3487cf3001fce6c396ea0625f7421095f1016b3c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 05:06:51 +0100 Subject: [PATCH 18/79] recursively update viability --- node/core/chain-selection/src/lib.rs | 127 ++++++++++++++++++++++++--- 1 file changed, 114 insertions(+), 13 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index d80d72f3317d..d1d0cb735144 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -60,23 +60,23 @@ struct ViabilityCriteria { explicitly_reverted: bool, // `None` means approved. `Some` means unapproved. approval: Approval, - earliest_non_viable_ancestor: Option, + earliest_unviable_ancestor: Option, } impl ViabilityCriteria { fn is_viable(&self) -> bool { - self.is_parent_viable() && !self.is_unviable_source() + self.is_parent_viable() && self.is_partially_viable() } - // Whether the current block is a source of unviability itself. - // That is, whether the current block is reverted or stagnant. - fn is_unviable_source(&self) -> bool { - self.explicitly_reverted || self.approval.is_stagnant() + // Whether the current block is partially viable. + // That is, whether the current block is neither reverted nor stagnant. + fn is_partially_viable(&self) -> bool { + !self.explicitly_reverted && !self.approval.is_stagnant() } // Whether the parent is viable. fn is_parent_viable(&self) -> bool { - self.earliest_non_viable_ancestor.is_none() + self.earliest_unviable_ancestor.is_none() } } @@ -130,7 +130,7 @@ impl BlockEntry { if self.viability.is_viable() { None } else { - self.viability.earliest_non_viable_ancestor.or(Some(self.block_hash)) + self.viability.earliest_unviable_ancestor.or(Some(self.block_hash)) } } } @@ -477,7 +477,7 @@ fn import_block_ignoring_reversions( parent_hash: parent_hash, children: Vec::new(), viability: ViabilityCriteria { - earliest_non_viable_ancestor: inherited_viability, + earliest_unviable_ancestor: inherited_viability, explicitly_reverted: false, approval: Approval::Unapproved, }, @@ -586,6 +586,55 @@ fn load_ancestor( Ok(current_entry) } +// A viability update to be applied to a block. +struct ViabilityUpdate(Option); + +impl ViabilityUpdate { + // Apply the viability update to a single block, yielding the updated + // block entry along with a vector of children and the updates to apply + // to them. + fn apply(self, mut entry: BlockEntry) -> ( + Option, + Vec<(Hash, ViabilityUpdate)> + ) { + // 1. When an ancestor has changed from unviable to viable, + // we erase the `earliest_unviable_ancestor` of all descendants + // until encountering a partially unviable descendant D. + // + // We then update the `earliest_unviable_ancestor` for all + // descendants of D to be equal to D. + // + // 2. When an ancestor A has changed from viable to unviable, + // we update the `earliest_unviable_ancestor` for all blocks + // to A. + // + // The following algorithm covers both cases. + + let maybe_earliest_unviable = self.0; + let next_earliest_unviable = { + if maybe_earliest_unviable.is_none() && !entry.viability.is_partially_viable() { + Some(entry.block_hash) + } else { + maybe_earliest_unviable + } + }; + + let recurse = entry.children.iter() + .cloned() + .map(move |c| (c, ViabilityUpdate(next_earliest_unviable))) + .collect(); + + let new_entry = if entry.viability.earliest_unviable_ancestor == maybe_earliest_unviable { + None + } else { + entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable; + Some(entry) + }; + + (new_entry, recurse) + } +} + // Propagate viability update to descendants of the given block. // // If the block entry provided is self-unviable, then it's assumed that an @@ -593,11 +642,58 @@ fn load_ancestor( // // If the block entry provided is self-viable, then it's assumed that a // viability update needs to be propagated to descendants. -fn propagate_viability_change( +fn propagate_viability_update( backend: &mut OverlayedBackend, - entry: &BlockEntry, + base: BlockEntry, ) -> Result<(), Error> { - unimplemented!(); + enum BlockEntryRef { + Explicit(BlockEntry), + Hash(Hash), + } + + if !base.viability.is_parent_viable() { + // If the parent of the block is still unviable, + // then the `earliest_viable_ancestor` will not change + // regardless of the change in the block here. + return Ok(()) + } + + // If the base block is itself partially unviable, + // this will change to a `Some(base_hash)` after the first + // invocation. + let viability_update = ViabilityUpdate(None); + + // Recursively apply update to tree + let mut frontier = vec![(BlockEntryRef::Explicit(base), viability_update)]; + while let Some((entry_ref, update)) = frontier.pop() { + let entry = match entry_ref { + BlockEntryRef::Explicit(entry) => entry, + BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? { + None => { + tracing::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Missing expected block entry" + ); + + continue; + } + Some(entry) => entry, + } + }; + + let (new_entry, children) = update.apply(entry); + + if let Some(new_entry) = new_entry { + backend.write_block_entry(new_entry.block_hash, new_entry); + } + + frontier.extend( + children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update)) + ); + } + + Ok(()) } // Assuming that a block is already imported, scans the header of the block @@ -650,7 +746,12 @@ fn apply_imported_block_reversions( ancestor_entry.viability.explicitly_reverted = true; backend.write_block_entry(ancestor_entry.block_hash, ancestor_entry.clone()); - propagate_viability_change(backend, &ancestor_entry)?; + propagate_viability_update(backend, ancestor_entry)?; + + // TODO [now]: update leaves set. For this we need to know any visited + // blocks which were leaves, so we can remove them. We also add the + // parent of the earliest viable ancestor to the leaves-set, if it's + // present. } Ok(()) From 4a1f610e3fc505dd7b2cb53a280db41f7315098c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 05:25:54 +0100 Subject: [PATCH 19/79] remove redundant parameter from WriteBlockEntry --- node/core/chain-selection/src/lib.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index d1d0cb735144..1486f14cfc87 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -189,7 +189,7 @@ fn stagnant_timeout_from_now() -> Timestamp { } enum BackendWriteOp { - WriteBlockEntry(Hash, BlockEntry), + WriteBlockEntry(BlockEntry), WriteBlocksByNumber(BlockNumber, Vec), WriteViableLeaves(LeafEntrySet), WriteStagnantAt(Timestamp, Vec), @@ -276,8 +276,8 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { self.inner.load_stagnant_at(timestamp) } - fn write_block_entry(&mut self, hash: Hash, entry: BlockEntry) { - self.block_entries.insert(hash, Some(entry)); + fn write_block_entry(&mut self, entry: BlockEntry) { + self.block_entries.insert(entry.block_hash, Some(entry)); } fn delete_block_entry(&mut self, hash: &Hash) { @@ -306,7 +306,7 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { fn into_write_ops(self) -> impl Iterator { let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { - Some(v) => BackendWriteOp::WriteBlockEntry(h, v), + Some(v) => BackendWriteOp::WriteBlockEntry(v), None => BackendWriteOp::DeleteBlockEntry(h), }); @@ -471,7 +471,6 @@ fn import_block_ignoring_reversions( // 1. Add the block to the DB assuming it's not reverted. backend.write_block_entry( - block_hash, BlockEntry { block_hash, parent_hash: parent_hash, @@ -494,7 +493,7 @@ fn import_block_ignoring_reversions( // 3. Update and write the parent if let Some(mut parent_entry) = parent_entry { parent_entry.children.push(block_hash); - backend.write_block_entry(parent_hash, parent_entry); + backend.write_block_entry(parent_entry); } // 4. Add to blocks-by-number. @@ -685,7 +684,7 @@ fn propagate_viability_update( let (new_entry, children) = update.apply(entry); if let Some(new_entry) = new_entry { - backend.write_block_entry(new_entry.block_hash, new_entry); + backend.write_block_entry(new_entry); } frontier.extend( @@ -744,7 +743,7 @@ fn apply_imported_block_reversions( }; ancestor_entry.viability.explicitly_reverted = true; - backend.write_block_entry(ancestor_entry.block_hash, ancestor_entry.clone()); + backend.write_block_entry(ancestor_entry.clone()); propagate_viability_update(backend, ancestor_entry)?; From ae745366d72ec3f1f2ec69972db03133966fe661 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 05:38:51 +0100 Subject: [PATCH 20/79] do some removal of viable leaves --- node/core/chain-selection/src/lib.rs | 32 +++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 1486f14cfc87..60c2cf6e670d 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -654,9 +654,28 @@ fn propagate_viability_update( // If the parent of the block is still unviable, // then the `earliest_viable_ancestor` will not change // regardless of the change in the block here. + // + // Furthermore, in such cases, the set of viable leaves + // does not change at all. return Ok(()) } + let mut viable_leaves = backend.load_leaves(); + + if base.viability.is_partially_viable() { + // At this point, we know that the parent is viable, + // and this block has just become viable. Therefore, + // if the parent was a viable leaf, it is no longer one. + viable_leaves.remove(&base.parent_hash); + } else { + // At this point, we know that the parent is viable, + // and this block has just become unviable. Therefore, + // this block is not a viable leaf and we must search for + // viable leaves starting from the parent. + viable_leaves.remove(&base.block_hash); + // TODO [now]: search for viable leaves from the parent. + } + // If the base block is itself partially unviable, // this will change to a `Some(base_hash)` after the first // invocation. @@ -684,14 +703,22 @@ fn propagate_viability_update( let (new_entry, children) = update.apply(entry); if let Some(new_entry) = new_entry { + if !new_entry.viability.is_viable() { + viable_leaves.remove(&new_entry.block_hash); + } + backend.write_block_entry(new_entry); } + // TODO [now]: figure out how to find new viable leaves. + frontier.extend( children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update)) ); } + backend.write_leaves(viable_leaves); + Ok(()) } @@ -746,11 +773,6 @@ fn apply_imported_block_reversions( backend.write_block_entry(ancestor_entry.clone()); propagate_viability_update(backend, ancestor_entry)?; - - // TODO [now]: update leaves set. For this we need to know any visited - // blocks which were leaves, so we can remove them. We also add the - // parent of the earliest viable ancestor to the leaves-set, if it's - // present. } Ok(()) From b6788280cecca04a05abc4570f7d10324a07a540 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 16:06:37 +0100 Subject: [PATCH 21/79] address grumbles --- node/core/chain-selection/src/lib.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 60c2cf6e670d..1947a354635a 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -47,10 +47,7 @@ enum Approval { impl Approval { fn is_stagnant(&self) -> bool { - match *self { - Approval::Stagnant => true, - _ => false, - } + matches!(Approval::Stagnant) } } @@ -58,8 +55,10 @@ impl Approval { struct ViabilityCriteria { // Whether this block has been explicitly reverted by one of its descendants. explicitly_reverted: bool, - // `None` means approved. `Some` means unapproved. + // The approval state of this block specifically. approval: Approval, + // The earliest unviable ancestor - the hash of the earliest unfinalized + // block in the ancestry which is explicitly reverted or stagnant. earliest_unviable_ancestor: Option, } @@ -74,7 +73,8 @@ impl ViabilityCriteria { !self.explicitly_reverted && !self.approval.is_stagnant() } - // Whether the parent is viable. + // Whether the parent is viable. This assumes that the parent + // descends from the finalized chain. fn is_parent_viable(&self) -> bool { self.earliest_unviable_ancestor.is_none() } From 645327a3ea20e380fb40d3f2a5a08b6dc88cdb0f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 16:35:50 +0100 Subject: [PATCH 22/79] refactor --- node/core/chain-selection/src/lib.rs | 56 ++++++++++++---------------- 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 1947a354635a..0b7d497843f3 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -47,7 +47,7 @@ enum Approval { impl Approval { fn is_stagnant(&self) -> bool { - matches!(Approval::Stagnant) + matches!(*self, Approval::Stagnant) } } @@ -126,6 +126,13 @@ struct BlockEntry { } impl BlockEntry { + fn leaf_entry(&self) -> LeafEntry { + LeafEntry { + block_hash: self.block_hash, + weight: self.weight, + } + } + fn non_viable_ancestor_for_child(&self) -> Option { if self.viability.is_viable() { None @@ -593,7 +600,7 @@ impl ViabilityUpdate { // block entry along with a vector of children and the updates to apply // to them. fn apply(self, mut entry: BlockEntry) -> ( - Option, + BlockEntry, Vec<(Hash, ViabilityUpdate)> ) { // 1. When an ancestor has changed from unviable to viable, @@ -608,6 +615,17 @@ impl ViabilityUpdate { // to A. // // The following algorithm covers both cases. + // + // Furthermore, if there has been any change in viability, + // it is necessary to visit every single descendant of the root + // block. + // + // If a block B was unviable and is now viable, then every descendant + // has an `earliest_unviable_ancestor` which must be updated either + // to nothing or to the new earliest unviable ancestor. + // + // If a block B was viable and is now unviable, then every descendant + // has an `earliest_unviable_ancestor` which needs to be set to B. let maybe_earliest_unviable = self.0; let next_earliest_unviable = { @@ -617,20 +635,14 @@ impl ViabilityUpdate { maybe_earliest_unviable } }; + entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable; let recurse = entry.children.iter() .cloned() .map(move |c| (c, ViabilityUpdate(next_earliest_unviable))) .collect(); - let new_entry = if entry.viability.earliest_unviable_ancestor == maybe_earliest_unviable { - None - } else { - entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable; - Some(entry) - }; - - (new_entry, recurse) + (entry, recurse) } } @@ -660,21 +672,7 @@ fn propagate_viability_update( return Ok(()) } - let mut viable_leaves = backend.load_leaves(); - - if base.viability.is_partially_viable() { - // At this point, we know that the parent is viable, - // and this block has just become viable. Therefore, - // if the parent was a viable leaf, it is no longer one. - viable_leaves.remove(&base.parent_hash); - } else { - // At this point, we know that the parent is viable, - // and this block has just become unviable. Therefore, - // this block is not a viable leaf and we must search for - // viable leaves starting from the parent. - viable_leaves.remove(&base.block_hash); - // TODO [now]: search for viable leaves from the parent. - } + let mut viable_leaves = backend.load_leaves()?; // If the base block is itself partially unviable, // this will change to a `Some(base_hash)` after the first @@ -702,13 +700,7 @@ fn propagate_viability_update( let (new_entry, children) = update.apply(entry); - if let Some(new_entry) = new_entry { - if !new_entry.viability.is_viable() { - viable_leaves.remove(&new_entry.block_hash); - } - - backend.write_block_entry(new_entry); - } + backend.write_block_entry(new_entry); // TODO [now]: figure out how to find new viable leaves. From 2a407211a7ab9986f45c0b0e5eb2056a75e83122 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 16:37:46 +0100 Subject: [PATCH 23/79] address grumbles --- node/core/chain-selection/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 0b7d497843f3..77f115e8d55e 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -480,7 +480,7 @@ fn import_block_ignoring_reversions( backend.write_block_entry( BlockEntry { block_hash, - parent_hash: parent_hash, + parent_hash, children: Vec::new(), viability: ViabilityCriteria { earliest_unviable_ancestor: inherited_viability, @@ -537,9 +537,8 @@ fn extract_reversion_logs(header: &Header) -> Vec { None } - Ok(Some(ConsensusLog::Revert(b))) => if b < number { - Some(b) - } else { + Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), + Ok(Some(ConsensusLog::Revert(b))) => { tracing::warn!( target: LOG_TARGET, revert_target = b, From f76d410cfb4db4157bd2dfd2cba72a209676f317 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 16:45:55 +0100 Subject: [PATCH 24/79] add comment about non-monotonicity --- node/core/chain-selection/src/lib.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 77f115e8d55e..092a38c209f4 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -173,6 +173,14 @@ impl Error { } fn timestamp_now() -> Timestamp { + // `SystemTime` is notoriously non-monotonic, so our timers might not work + // exactly as expected. Regardless, stagnation is detected on the order of minutes, + // and slippage of a few seconds in either direction won't cause any major harm. + // + // The exact time that a block becomes stagnant in the local node is always expected + // to differ from other nodes due to network asynchrony and delays in block propagation. + // Non-monotonicity exarcerbates that somewhat, but not meaningfully. + match SystemTime::now().duration_since(UNIX_EPOCH) { Ok(d) => d.as_secs(), Err(e) => { From 2602e501ebceb87e21d3c3a889fd308e649fbada Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 17:50:10 +0100 Subject: [PATCH 25/79] extract backend to submodule --- node/core/chain-selection/src/backend.rs | 174 +++++++++++++++++++++++ node/core/chain-selection/src/lib.rs | 146 +------------------ 2 files changed, 178 insertions(+), 142 deletions(-) create mode 100644 node/core/chain-selection/src/backend.rs diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs new file mode 100644 index 000000000000..e8531df962a9 --- /dev/null +++ b/node/core/chain-selection/src/backend.rs @@ -0,0 +1,174 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! An abstraction over storage used by the chain selection subsystem. +//! +//! This provides both a [`Backend`] trait and an [`OverlayedBackend`] +//! struct which allows in-memory changes to be applied on top of a +//! [`Backend`], maintaining consistency between queries and temporary writes, +//! before any commit to the underlying storage is made. + +use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; + +use std::collections::HashMap; + +use crate::{Error, LeafEntrySet, BlockEntry, Timestamp}; + +pub(super) enum BackendWriteOp { + WriteBlockEntry(BlockEntry), + WriteBlocksByNumber(BlockNumber, Vec), + WriteViableLeaves(LeafEntrySet), + WriteStagnantAt(Timestamp, Vec), + DeleteBlocksByNumber(BlockNumber), + DeleteBlockEntry(Hash), + DeleteStagnantAt(Timestamp), +} + +/// An abstraction over backend storage for the logic of this subsystem. +pub(super) trait Backend { + /// Load a block entry from the DB. + fn load_block_entry(&self, hash: &Hash) -> Result, Error>; + /// Load the active-leaves set. + fn load_leaves(&self) -> Result; + /// Load the stagnant list at the given timestamp. + fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error>; + /// Load all stagnant lists up to and including the given unix timestamp. + fn load_stagnant_at_up_to(&self, up_to: Timestamp) + -> Result)>, Error>; + /// Load the earliest kept block number. + fn load_first_block_number(&self) -> Result, Error>; + /// Load blocks by number. + fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error>; + + /// Atomically write the list of operations, with later operations taking precedence over prior. + fn write(&mut self, ops: Vec) -> Result<(), Error>; +} + +/// An in-memory overlay over the backend. +/// +/// This maintains read-only access to the underlying backend, but can be +/// converted into a set of write operations which will, when written to +/// the underlying backend, give the same view as the state of the overlay. +pub(super) struct OverlayedBackend<'a, B: 'a> { + inner: &'a B, + + // `None` means 'deleted', missing means query inner. + block_entries: HashMap>, + // `None` means 'deleted', missing means query inner. + blocks_by_number: HashMap>>, + // 'None' means 'deleted', missing means query inner. + stagnant_at: HashMap>>, + // 'None' means query inner. + leaves: Option, +} + +impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { + pub(super) fn new(backend: &'a B) -> Self { + OverlayedBackend { + inner: backend, + block_entries: HashMap::new(), + blocks_by_number: HashMap::new(), + stagnant_at: HashMap::new(), + leaves: None, + } + } + + pub(super) fn load_block_entry(&self, hash: &Hash) -> Result, Error> { + if let Some(val) = self.block_entries.get(&hash) { + return Ok(val.clone()) + } + + self.inner.load_block_entry(hash) + } + + pub(super) fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { + if let Some(val) = self.blocks_by_number.get(&number) { + return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); + } + + self.inner.load_blocks_by_number(number) + } + + pub(super) fn load_leaves(&self) -> Result { + if let Some(ref set) = self.leaves { + return Ok(set.clone()) + } + + self.inner.load_leaves() + } + + pub(super) fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error> { + if let Some(val) = self.stagnant_at.get(×tamp) { + return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); + } + + self.inner.load_stagnant_at(timestamp) + } + + pub(super) fn write_block_entry(&mut self, entry: BlockEntry) { + self.block_entries.insert(entry.block_hash, Some(entry)); + } + + pub(super) fn delete_block_entry(&mut self, hash: &Hash) { + self.block_entries.remove(hash); + } + + pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec) { + self.blocks_by_number.insert(number, Some(blocks)); + } + + pub(super) fn delete_blocks_by_number(&mut self, number: BlockNumber) { + self.blocks_by_number.insert(number, None); + } + + pub(super) fn write_leaves(&mut self, leaves: LeafEntrySet) { + self.leaves = Some(leaves); + } + + pub(super) fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec) { + self.stagnant_at.insert(timestamp, Some(hashes)); + } + + pub(super) fn delete_stagnant_at(&mut self, timestamp: Timestamp) { + self.stagnant_at.insert(timestamp, None); + } + + /// Transform this backend into a set of write-ops to be written to the + /// inner backend. + pub(super) fn into_write_ops(self) -> impl Iterator { + let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { + Some(v) => BackendWriteOp::WriteBlockEntry(v), + None => BackendWriteOp::DeleteBlockEntry(h), + }); + + let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v { + Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v), + None => BackendWriteOp::DeleteBlocksByNumber(n), + }); + + let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves); + + let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v { + Some(v) => BackendWriteOp::WriteStagnantAt(n, v), + None => BackendWriteOp::DeleteStagnantAt(n), + }); + + block_entry_ops + .chain(blocks_by_number_ops) + .chain(leaf_ops) + .chain(stagnant_at_ops) + } +} diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 092a38c209f4..b318d5f9d64c 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -27,9 +27,12 @@ use polkadot_subsystem::{ use parity_scale_codec::Error as CodecError; use futures::channel::oneshot; -use std::collections::HashMap; use std::time::{UNIX_EPOCH, SystemTime}; +use crate::backend::{Backend, OverlayedBackend, BackendWriteOp}; + +mod backend; + const LOG_TARGET: &str = "parachain::chain-selection"; type Weight = u64; @@ -203,147 +206,6 @@ fn stagnant_timeout_from_now() -> Timestamp { timestamp_now() + STAGNANT_TIMEOUT } -enum BackendWriteOp { - WriteBlockEntry(BlockEntry), - WriteBlocksByNumber(BlockNumber, Vec), - WriteViableLeaves(LeafEntrySet), - WriteStagnantAt(Timestamp, Vec), - DeleteBlocksByNumber(BlockNumber), - DeleteBlockEntry(Hash), - DeleteStagnantAt(Timestamp), -} - -// An abstraction over backend for the logic of this subsystem. -// TODO [now]: extract to submodule -trait Backend { - /// Load a block entry from the DB. - fn load_block_entry(&self, hash: &Hash) -> Result, Error>; - /// Load the active-leaves set. - fn load_leaves(&self) -> Result; - /// Load the stagnant list at the given timestamp. - fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error>; - /// Load all stagnant lists up to and including the given unix timestamp. - fn load_stagnant_at_up_to(&self, up_to: Timestamp) - -> Result)>, Error>; - /// Load the earliest kept block number. - fn load_first_block_number(&self) -> Result, Error>; - /// Load blocks by number. - fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error>; - - /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&mut self, ops: Vec) -> Result<(), Error>; -} - -// An in-memory overlay over the backend. -struct OverlayedBackend<'a, B: 'a> { - inner: &'a B, - - // `None` means 'deleted', missing means query inner. - block_entries: HashMap>, - // `None` means 'deleted', missing means query inner. - blocks_by_number: HashMap>>, - // 'None' means 'deleted', missing means query inner. - stagnant_at: HashMap>>, - // 'None' means query inner. - leaves: Option, -} - -impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { - fn new(backend: &'a B) -> Self { - OverlayedBackend { - inner: backend, - block_entries: HashMap::new(), - blocks_by_number: HashMap::new(), - stagnant_at: HashMap::new(), - leaves: None, - } - } - - fn load_block_entry(&self, hash: &Hash) -> Result, Error> { - if let Some(val) = self.block_entries.get(&hash) { - return Ok(val.clone()) - } - - self.inner.load_block_entry(hash) - } - - fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { - if let Some(val) = self.blocks_by_number.get(&number) { - return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); - } - - self.inner.load_blocks_by_number(number) - } - - fn load_leaves(&self) -> Result { - if let Some(ref set) = self.leaves { - return Ok(set.clone()) - } - - self.inner.load_leaves() - } - - fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error> { - if let Some(val) = self.stagnant_at.get(×tamp) { - return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); - } - - self.inner.load_stagnant_at(timestamp) - } - - fn write_block_entry(&mut self, entry: BlockEntry) { - self.block_entries.insert(entry.block_hash, Some(entry)); - } - - fn delete_block_entry(&mut self, hash: &Hash) { - self.block_entries.remove(hash); - } - - fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec) { - self.blocks_by_number.insert(number, Some(blocks)); - } - - fn delete_blocks_by_number(&mut self, number: BlockNumber) { - self.blocks_by_number.insert(number, None); - } - - fn write_leaves(&mut self, leaves: LeafEntrySet) { - self.leaves = Some(leaves); - } - - fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec) { - self.stagnant_at.insert(timestamp, Some(hashes)); - } - - fn delete_stagnant_at(&mut self, timestamp: Timestamp) { - self.stagnant_at.insert(timestamp, None); - } - - fn into_write_ops(self) -> impl Iterator { - let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { - Some(v) => BackendWriteOp::WriteBlockEntry(v), - None => BackendWriteOp::DeleteBlockEntry(h), - }); - - let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v { - Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v), - None => BackendWriteOp::DeleteBlocksByNumber(n), - }); - - let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves); - - let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v { - Some(v) => BackendWriteOp::WriteStagnantAt(n, v), - None => BackendWriteOp::DeleteStagnantAt(n), - }); - - block_entry_ops - .chain(blocks_by_number_ops) - .chain(leaf_ops) - .chain(stagnant_at_ops) - } -} - async fn run(mut ctx: Context, mut backend: B) where Context: SubsystemContext, From efc0963294be9de0b5eff7e43eb140cf6ada6b80 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 18:17:09 +0100 Subject: [PATCH 26/79] begin the hunt for viable leaves --- node/core/chain-selection/src/lib.rs | 84 ++++++++++++++++++++++++++-- 1 file changed, 78 insertions(+), 6 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index b318d5f9d64c..21dbf4c84b3f 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -27,6 +27,7 @@ use polkadot_subsystem::{ use parity_scale_codec::Error as CodecError; use futures::channel::oneshot; +use std::collections::HashMap; use std::time::{UNIX_EPOCH, SystemTime}; use crate::backend::{Backend, OverlayedBackend, BackendWriteOp}; @@ -515,6 +516,46 @@ impl ViabilityUpdate { } } +// This is a set of block hashes which serve as the starting point in the +// search for new active leaves. The hashes in the values are children +// which are already known to be unviable. +type LeafSearchFrontier = HashMap>; + +fn search_for_viable_leaves( + backend: &mut OverlayedBackend, + frontier: LeafSearchFrontier, + viable_leaves: &mut LeafEntrySet, +) -> Result<(), Error> { + for (base, known_unviable_children) in frontier { + let base_entry = match backend.load_block_entry(&base)? { + None => { + // This means the block is finalized. We might reach this + // code path where a child of the finalized block becomes + // unviable. Each such child is the root of its own tree + // which, as an invariant, does not depend on the viability + // of the finalized block. So no siblings need to be inspected + // and we can ignore it safely. + continue + } + Some(e) => e, + }; + + let maybe_viable_children: Vec<_> = base_entry.children + .iter() + .filter(|c| !known_unviable_children.contains(c)) + .collect(); + + if maybe_viable_children.is_empty() { + viable_leaves.insert(base_entry.leaf_entry()); + } + + // Recurse into children. + unimplemented!() + } + + Ok(()) +} + // Propagate viability update to descendants of the given block. // // If the block entry provided is self-unviable, then it's assumed that an @@ -543,14 +584,21 @@ fn propagate_viability_update( let mut viable_leaves = backend.load_leaves()?; + let mut leaf_search_frontier = LeafSearchFrontier::new(); + // If the base block is itself partially unviable, // this will change to a `Some(base_hash)` after the first // invocation. let viability_update = ViabilityUpdate(None); - // Recursively apply update to tree - let mut frontier = vec![(BlockEntryRef::Explicit(base), viability_update)]; - while let Some((entry_ref, update)) = frontier.pop() { + // Recursively apply update to tree. + // + // As we go, we remove any blocks from the leaves which are no longer viable + // leaves. We also add blocks to the leaves-set which are obviously viable leaves. + // And we build up a frontier of blocks which may either be viable leaves or + // the ancestors of one. + let mut tree_frontier = vec![(BlockEntryRef::Explicit(base), viability_update)]; + while let Some((entry_ref, update)) = tree_frontier.pop() { let entry = match entry_ref { BlockEntryRef::Explicit(entry) => entry, BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? { @@ -569,15 +617,39 @@ fn propagate_viability_update( let (new_entry, children) = update.apply(entry); - backend.write_block_entry(new_entry); + if new_entry.viability.is_viable() { + // A block which is viable has a parent which is obviously not + // in the viable leaves set. + viable_leaves.remove(&new_entry.parent_hash); - // TODO [now]: figure out how to find new viable leaves. + // Furthermore, if the block is viable and has no children, + // it is viable by definition. + if new_entry.children.is_empty() { + viable_leaves.insert(new_entry.leaf_entry()); + } + } else { + // A block which is not viable is certainly not a viable leaf. + viable_leaves.remove(&new_entry.block_hash); + + // But if its parent is viable, then the parent is the root + // of a tree which contains a viable leaf. Save it for later + // so we can recurse into the other children. + if new_entry.viability.is_parent_viable() { + leaf_search_frontier + .entry(new_entry.parent_hash) + .or_default() + .push(new_entry.block_hash); + } + } + + backend.write_block_entry(new_entry); - frontier.extend( + tree_frontier.extend( children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update)) ); } + search_for_viable_leaves(backend, leaf_search_frontier, &mut viable_leaves); backend.write_leaves(viable_leaves); Ok(()) From 5ec0064c8be1fd1afc42ec60a2ac7a5e3a03f769 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 18:41:15 +0100 Subject: [PATCH 27/79] viability pivots for updating the active leaves --- node/core/chain-selection/src/lib.rs | 104 +++++++++++++++------------ 1 file changed, 60 insertions(+), 44 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 21dbf4c84b3f..191c8ef456b1 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -521,41 +521,6 @@ impl ViabilityUpdate { // which are already known to be unviable. type LeafSearchFrontier = HashMap>; -fn search_for_viable_leaves( - backend: &mut OverlayedBackend, - frontier: LeafSearchFrontier, - viable_leaves: &mut LeafEntrySet, -) -> Result<(), Error> { - for (base, known_unviable_children) in frontier { - let base_entry = match backend.load_block_entry(&base)? { - None => { - // This means the block is finalized. We might reach this - // code path where a child of the finalized block becomes - // unviable. Each such child is the root of its own tree - // which, as an invariant, does not depend on the viability - // of the finalized block. So no siblings need to be inspected - // and we can ignore it safely. - continue - } - Some(e) => e, - }; - - let maybe_viable_children: Vec<_> = base_entry.children - .iter() - .filter(|c| !known_unviable_children.contains(c)) - .collect(); - - if maybe_viable_children.is_empty() { - viable_leaves.insert(base_entry.leaf_entry()); - } - - // Recurse into children. - unimplemented!() - } - - Ok(()) -} - // Propagate viability update to descendants of the given block. // // If the block entry provided is self-unviable, then it's assumed that an @@ -584,7 +549,13 @@ fn propagate_viability_update( let mut viable_leaves = backend.load_leaves()?; - let mut leaf_search_frontier = LeafSearchFrontier::new(); + // A mapping of Block Hash -> number + // Where the hash is the hash of a viable block which has + // at least 1 unviable child. + // + // The number is the number of known unviable children which is known + // as the pivot count. + let mut viability_pivots = HashMap::new(); // If the base block is itself partially unviable, // this will change to a `Some(base_hash)` after the first @@ -631,14 +602,11 @@ fn propagate_viability_update( // A block which is not viable is certainly not a viable leaf. viable_leaves.remove(&new_entry.block_hash); - // But if its parent is viable, then the parent is the root - // of a tree which contains a viable leaf. Save it for later - // so we can recurse into the other children. + // When the parent is viable but the entry itself is not, that means + // that the parent is a viability pivot. As we visit the children + // of a viability pivot, we build up an exhaustive pivot count. if new_entry.viability.is_parent_viable() { - leaf_search_frontier - .entry(new_entry.parent_hash) - .or_default() - .push(new_entry.block_hash); + *viability_pivots.entry(new_entry.parent_hash).or_insert(0) += 1; } } @@ -649,7 +617,55 @@ fn propagate_viability_update( ); } - search_for_viable_leaves(backend, leaf_search_frontier, &mut viable_leaves); + // Revisit the viability pivots now that we've traversed the entire subtree. + // After this point, the viable leaves set is fully updated. A proof follows. + // + // If the base has become unviable, then we've iterated into all descendants, + // made them unviable and removed them from the set. We know that the parent is + // viable as this function is a no-op otherwise, so we need to see if the parent + // has other children or not. + // + // If the base has become viable, then we've iterated into all descendants, + // and found all blocks which are viable and have no children. We've already added + // those blocks to the leaf set, but what we haven't detected + // is blocks which are viable and have children, but all of the children are + // unviable. + // + // The solution of viability pivots addresses both of these: + // + // When the base has become unviable, the parent's viability is unchanged and therefore + // any leaves descending from parent but not base are still in the viable leaves set. + // If the parent has only one child which is the base, the parent is now a viable leaf. + // We've already visited the base in recursive search so the set of pivots should + // contain only a single entry `(parent, 1)`. qed. + // + // When the base has become viable, we've already iterated into every descendant + // of the base and thus have collected a set of pivots whose corresponding pivot + // counts have already been exhaustively computed from their children. qed. + for (pivot, pivot_count) in viability_pivots { + match backend.load_block_entry(&pivot)? { + None => { + // This means the block is finalized. We might reach this + // code path when the base is a child of the finalized block + // and has become unviable. + // + // Each such child is the root of its own tree + // which, as an invariant, does not depend on the viability + // of the finalized block. So no siblings need to be inspected + // and we can ignore it safely. + // + // Furthermore, if the set of viable leaves is empty, the + // finalized block is implicitly the viable leaf. + continue + } + Some(entry) => { + if entry.children.len() == pivot_count { + viable_leaves.insert(entry.leaf_entry()); + } + } + } + } + backend.write_leaves(viable_leaves); Ok(()) From 750b8d7644fc9d3a2c6e10205e872310f58156ef Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 18:41:59 +0100 Subject: [PATCH 28/79] remove LeafSearchFrontier --- node/core/chain-selection/src/lib.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 191c8ef456b1..6007e130de5a 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -516,11 +516,6 @@ impl ViabilityUpdate { } } -// This is a set of block hashes which serve as the starting point in the -// search for new active leaves. The hashes in the values are children -// which are already known to be unviable. -type LeafSearchFrontier = HashMap>; - // Propagate viability update to descendants of the given block. // // If the block entry provided is self-unviable, then it's assumed that an From bb1de405259973147cb03f96ffb6b0fa6964c466 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 18:45:51 +0100 Subject: [PATCH 29/79] partially -> explicitly viable and untwist some booleans --- node/core/chain-selection/src/lib.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 6007e130de5a..d644ab8f785a 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -68,12 +68,12 @@ struct ViabilityCriteria { impl ViabilityCriteria { fn is_viable(&self) -> bool { - self.is_parent_viable() && self.is_partially_viable() + self.is_parent_viable() && self.is_explicitly_viable() } - // Whether the current block is partially viable. + // Whether the current block is explicitly viable. // That is, whether the current block is neither reverted nor stagnant. - fn is_partially_viable(&self) -> bool { + fn is_explicitly_viable(&self) -> bool { !self.explicitly_reverted && !self.approval.is_stagnant() } @@ -475,7 +475,7 @@ impl ViabilityUpdate { ) { // 1. When an ancestor has changed from unviable to viable, // we erase the `earliest_unviable_ancestor` of all descendants - // until encountering a partially unviable descendant D. + // until encountering a explicitly unviable descendant D. // // We then update the `earliest_unviable_ancestor` for all // descendants of D to be equal to D. @@ -499,7 +499,7 @@ impl ViabilityUpdate { let maybe_earliest_unviable = self.0; let next_earliest_unviable = { - if maybe_earliest_unviable.is_none() && !entry.viability.is_partially_viable() { + if maybe_earliest_unviable.is_none() && !entry.viability.is_explicitly_viable() { Some(entry.block_hash) } else { maybe_earliest_unviable @@ -552,7 +552,7 @@ fn propagate_viability_update( // as the pivot count. let mut viability_pivots = HashMap::new(); - // If the base block is itself partially unviable, + // If the base block is itself explicitly unviable, // this will change to a `Some(base_hash)` after the first // invocation. let viability_update = ViabilityUpdate(None); From 8f3c53321450b8865a66b60799df873508f73909 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 20:07:40 +0100 Subject: [PATCH 30/79] extract tree to submodule --- node/core/chain-selection/src/lib.rs | 406 +---------------------- node/core/chain-selection/src/tree.rs | 446 ++++++++++++++++++++++++++ 2 files changed, 449 insertions(+), 403 deletions(-) create mode 100644 node/core/chain-selection/src/tree.rs diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index d644ab8f785a..817d406376bc 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -16,7 +16,7 @@ //! Implements the Chain Selection Subsystem. -use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; +use polkadot_primitives::v1::{BlockNumber, Hash, Header}; use polkadot_subsystem::{ Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, OverseerSignal, FromOverseer, @@ -33,6 +33,7 @@ use std::time::{UNIX_EPOCH, SystemTime}; use crate::backend::{Backend, OverlayedBackend, BackendWriteOp}; mod backend; +mod tree; const LOG_TARGET: &str = "parachain::chain-selection"; @@ -315,409 +316,8 @@ async fn handle_active_leaf( // for this, we want ascending order. for (hash, header) in new_blocks.into_iter().rev() { let weight = unimplemented!(); - import_block(&mut overlay, hash, header, weight)?; + crate::tree::import_block(&mut overlay, hash, header, weight)?; } Ok(overlay.into_write_ops().collect()) } - -fn import_block( - backend: &mut OverlayedBackend, - block_hash: Hash, - block_header: Header, - weight: Weight, -) -> Result<(), Error> { - import_block_ignoring_reversions(backend, block_hash, &block_header, weight)?; - apply_imported_block_reversions(backend, block_hash, &block_header)?; - - Ok(()) -} - -fn import_block_ignoring_reversions( - backend: &mut OverlayedBackend, - block_hash: Hash, - block_header: &Header, - weight: Weight, -) -> Result<(), Error> { - let parent_hash = block_header.parent_hash; - - let mut leaves = backend.load_leaves()?; - let parent_entry = backend.load_block_entry(&parent_hash)?; - - let inherited_viability = parent_entry.as_ref() - .and_then(|parent| parent.non_viable_ancestor_for_child()); - - // 1. Add the block to the DB assuming it's not reverted. - backend.write_block_entry( - BlockEntry { - block_hash, - parent_hash, - children: Vec::new(), - viability: ViabilityCriteria { - earliest_unviable_ancestor: inherited_viability, - explicitly_reverted: false, - approval: Approval::Unapproved, - }, - weight, - } - ); - - // 2. Update leaves if parent was a viable leaf or the parent is unknown. - if leaves.remove(&parent_hash) || parent_entry.is_none() { - leaves.insert(LeafEntry { block_hash, weight }); - backend.write_leaves(leaves); - } - - // 3. Update and write the parent - if let Some(mut parent_entry) = parent_entry { - parent_entry.children.push(block_hash); - backend.write_block_entry(parent_entry); - } - - // 4. Add to blocks-by-number. - let mut blocks_by_number = backend.load_blocks_by_number(block_header.number)?; - blocks_by_number.push(block_hash); - backend.write_blocks_by_number(block_header.number, blocks_by_number); - - // 5. Add stagnation timeout. - let stagnant_at = stagnant_timeout_from_now(); - let mut stagnant_at_list = backend.load_stagnant_at(stagnant_at)?; - stagnant_at_list.push(block_hash); - backend.write_stagnant_at(stagnant_at, stagnant_at_list); - - Ok(()) -} - -// Extract all reversion logs from a header in ascending order. -// -// Ignores logs with number >= the block header number. -fn extract_reversion_logs(header: &Header) -> Vec { - let number = header.number; - let mut logs = header.digest.logs() - .iter() - .enumerate() - .filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) { - Err(e) => { - tracing::warn!( - target: LOG_TARGET, - err = ?e, - index = i, - block_hash = ?header.hash(), - "Digest item failed to encode" - ); - - None - } - Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), - Ok(Some(ConsensusLog::Revert(b))) => { - tracing::warn!( - target: LOG_TARGET, - revert_target = b, - block_number = number, - block_hash = ?header.hash(), - "Block issued invalid revert digest targeting itself or future" - ); - - None - } - Ok(_) => None, - }) - .collect::>(); - - logs.sort(); - - logs -} - -// Load the given ancestor's block entry, in descending order from the `block_hash`. -// The ancestor_number must be at least one block less than the `block_number`. -// -// The returned entry will be `None` if the range is invalid or any block in the path had -// no entry present. If any block entry was missing, it can safely be assumed to -// be finalized. -fn load_ancestor( - backend: &mut OverlayedBackend, - block_hash: Hash, - block_number: BlockNumber, - ancestor_number: BlockNumber, -) -> Result, Error> { - if block_number <= ancestor_number { return Ok(None) } - - let mut current_hash = block_hash; - let mut current_entry = None; - - let segment_length = (block_number - ancestor_number) + 1; - for _ in std::iter::repeat(()).take(segment_length as usize) { - match backend.load_block_entry(¤t_hash)? { - None => return Ok(None), - Some(entry) => { - let parent_hash = entry.parent_hash; - current_entry = Some(entry); - current_hash = parent_hash; - } - } - } - - // Current entry should always be `Some` here. - Ok(current_entry) -} - -// A viability update to be applied to a block. -struct ViabilityUpdate(Option); - -impl ViabilityUpdate { - // Apply the viability update to a single block, yielding the updated - // block entry along with a vector of children and the updates to apply - // to them. - fn apply(self, mut entry: BlockEntry) -> ( - BlockEntry, - Vec<(Hash, ViabilityUpdate)> - ) { - // 1. When an ancestor has changed from unviable to viable, - // we erase the `earliest_unviable_ancestor` of all descendants - // until encountering a explicitly unviable descendant D. - // - // We then update the `earliest_unviable_ancestor` for all - // descendants of D to be equal to D. - // - // 2. When an ancestor A has changed from viable to unviable, - // we update the `earliest_unviable_ancestor` for all blocks - // to A. - // - // The following algorithm covers both cases. - // - // Furthermore, if there has been any change in viability, - // it is necessary to visit every single descendant of the root - // block. - // - // If a block B was unviable and is now viable, then every descendant - // has an `earliest_unviable_ancestor` which must be updated either - // to nothing or to the new earliest unviable ancestor. - // - // If a block B was viable and is now unviable, then every descendant - // has an `earliest_unviable_ancestor` which needs to be set to B. - - let maybe_earliest_unviable = self.0; - let next_earliest_unviable = { - if maybe_earliest_unviable.is_none() && !entry.viability.is_explicitly_viable() { - Some(entry.block_hash) - } else { - maybe_earliest_unviable - } - }; - entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable; - - let recurse = entry.children.iter() - .cloned() - .map(move |c| (c, ViabilityUpdate(next_earliest_unviable))) - .collect(); - - (entry, recurse) - } -} - -// Propagate viability update to descendants of the given block. -// -// If the block entry provided is self-unviable, then it's assumed that an -// unviability update needs to be propagated to descendants. -// -// If the block entry provided is self-viable, then it's assumed that a -// viability update needs to be propagated to descendants. -fn propagate_viability_update( - backend: &mut OverlayedBackend, - base: BlockEntry, -) -> Result<(), Error> { - enum BlockEntryRef { - Explicit(BlockEntry), - Hash(Hash), - } - - if !base.viability.is_parent_viable() { - // If the parent of the block is still unviable, - // then the `earliest_viable_ancestor` will not change - // regardless of the change in the block here. - // - // Furthermore, in such cases, the set of viable leaves - // does not change at all. - return Ok(()) - } - - let mut viable_leaves = backend.load_leaves()?; - - // A mapping of Block Hash -> number - // Where the hash is the hash of a viable block which has - // at least 1 unviable child. - // - // The number is the number of known unviable children which is known - // as the pivot count. - let mut viability_pivots = HashMap::new(); - - // If the base block is itself explicitly unviable, - // this will change to a `Some(base_hash)` after the first - // invocation. - let viability_update = ViabilityUpdate(None); - - // Recursively apply update to tree. - // - // As we go, we remove any blocks from the leaves which are no longer viable - // leaves. We also add blocks to the leaves-set which are obviously viable leaves. - // And we build up a frontier of blocks which may either be viable leaves or - // the ancestors of one. - let mut tree_frontier = vec![(BlockEntryRef::Explicit(base), viability_update)]; - while let Some((entry_ref, update)) = tree_frontier.pop() { - let entry = match entry_ref { - BlockEntryRef::Explicit(entry) => entry, - BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? { - None => { - tracing::warn!( - target: LOG_TARGET, - block_hash = ?hash, - "Missing expected block entry" - ); - - continue; - } - Some(entry) => entry, - } - }; - - let (new_entry, children) = update.apply(entry); - - if new_entry.viability.is_viable() { - // A block which is viable has a parent which is obviously not - // in the viable leaves set. - viable_leaves.remove(&new_entry.parent_hash); - - // Furthermore, if the block is viable and has no children, - // it is viable by definition. - if new_entry.children.is_empty() { - viable_leaves.insert(new_entry.leaf_entry()); - } - } else { - // A block which is not viable is certainly not a viable leaf. - viable_leaves.remove(&new_entry.block_hash); - - // When the parent is viable but the entry itself is not, that means - // that the parent is a viability pivot. As we visit the children - // of a viability pivot, we build up an exhaustive pivot count. - if new_entry.viability.is_parent_viable() { - *viability_pivots.entry(new_entry.parent_hash).or_insert(0) += 1; - } - } - - backend.write_block_entry(new_entry); - - tree_frontier.extend( - children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update)) - ); - } - - // Revisit the viability pivots now that we've traversed the entire subtree. - // After this point, the viable leaves set is fully updated. A proof follows. - // - // If the base has become unviable, then we've iterated into all descendants, - // made them unviable and removed them from the set. We know that the parent is - // viable as this function is a no-op otherwise, so we need to see if the parent - // has other children or not. - // - // If the base has become viable, then we've iterated into all descendants, - // and found all blocks which are viable and have no children. We've already added - // those blocks to the leaf set, but what we haven't detected - // is blocks which are viable and have children, but all of the children are - // unviable. - // - // The solution of viability pivots addresses both of these: - // - // When the base has become unviable, the parent's viability is unchanged and therefore - // any leaves descending from parent but not base are still in the viable leaves set. - // If the parent has only one child which is the base, the parent is now a viable leaf. - // We've already visited the base in recursive search so the set of pivots should - // contain only a single entry `(parent, 1)`. qed. - // - // When the base has become viable, we've already iterated into every descendant - // of the base and thus have collected a set of pivots whose corresponding pivot - // counts have already been exhaustively computed from their children. qed. - for (pivot, pivot_count) in viability_pivots { - match backend.load_block_entry(&pivot)? { - None => { - // This means the block is finalized. We might reach this - // code path when the base is a child of the finalized block - // and has become unviable. - // - // Each such child is the root of its own tree - // which, as an invariant, does not depend on the viability - // of the finalized block. So no siblings need to be inspected - // and we can ignore it safely. - // - // Furthermore, if the set of viable leaves is empty, the - // finalized block is implicitly the viable leaf. - continue - } - Some(entry) => { - if entry.children.len() == pivot_count { - viable_leaves.insert(entry.leaf_entry()); - } - } - } - } - - backend.write_leaves(viable_leaves); - - Ok(()) -} - -// Assuming that a block is already imported, scans the header of the block -// for revert signals and applies those to relevant ancestors, and recursively -// updates the viability of those ancestors' descendants. -fn apply_imported_block_reversions( - backend: &mut OverlayedBackend, - block_hash: Hash, - block_header: &Header, -) -> Result<(), Error> { - let logs = extract_reversion_logs(&block_header); - - // Note: since revert numbers are returned from `extract_reversion_logs` - // in ascending order, the expensive propagation of unviability is - // only heavy on the first log. - for revert_number in logs { - let mut ancestor_entry = match load_ancestor( - backend, - block_hash, - block_header.number, - revert_number, - )? { - None => { - tracing::warn!( - target: LOG_TARGET, - ?block_hash, - block_number = block_header.number, - revert_target = revert_number, - "The hammer has dropped. \ - A block has indicated that its finalized ancestor be reverted. \ - Please inform an adult.", - ); - - continue - } - Some(ancestor_entry) => { - tracing::info!( - target: LOG_TARGET, - ?block_hash, - block_number = block_header.number, - revert_target = revert_number, - revert_hash = ?ancestor_entry.block_hash, - "A block has signaled that its ancestor be reverted due to a bad parachain block.", - ); - - ancestor_entry - } - }; - - ancestor_entry.viability.explicitly_reverted = true; - backend.write_block_entry(ancestor_entry.clone()); - - propagate_viability_update(backend, ancestor_entry)?; - } - - Ok(()) -} diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs new file mode 100644 index 000000000000..5b059f468b5e --- /dev/null +++ b/node/core/chain-selection/src/tree.rs @@ -0,0 +1,446 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements the tree-view over the data backend which we use to determine +//! viable leaves. +//! +//! The metadata is structured as a tree, with the root implicitly being the +//! finalized block, which is not stored as part of the tree. +//! +//! Each direct descendant of the finalized block acts as its own sub-tree, +//! and as the finalized block advances, orphaned sub-trees are entirely pruned. + +use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; + +use std::collections::HashMap; + +use super::{ + LOG_TARGET, + Approval, BlockEntry, Error, LeafEntry, LeafEntrySet, ViabilityCriteria, Weight, +}; +use crate::backend::{Backend, OverlayedBackend}; + +// A viability update to be applied to a block. +struct ViabilityUpdate(Option); + +impl ViabilityUpdate { + // Apply the viability update to a single block, yielding the updated + // block entry along with a vector of children and the updates to apply + // to them. + fn apply(self, mut entry: BlockEntry) -> ( + BlockEntry, + Vec<(Hash, ViabilityUpdate)> + ) { + // 1. When an ancestor has changed from unviable to viable, + // we erase the `earliest_unviable_ancestor` of all descendants + // until encountering a explicitly unviable descendant D. + // + // We then update the `earliest_unviable_ancestor` for all + // descendants of D to be equal to D. + // + // 2. When an ancestor A has changed from viable to unviable, + // we update the `earliest_unviable_ancestor` for all blocks + // to A. + // + // The following algorithm covers both cases. + // + // Furthermore, if there has been any change in viability, + // it is necessary to visit every single descendant of the root + // block. + // + // If a block B was unviable and is now viable, then every descendant + // has an `earliest_unviable_ancestor` which must be updated either + // to nothing or to the new earliest unviable ancestor. + // + // If a block B was viable and is now unviable, then every descendant + // has an `earliest_unviable_ancestor` which needs to be set to B. + + let maybe_earliest_unviable = self.0; + let next_earliest_unviable = { + if maybe_earliest_unviable.is_none() && !entry.viability.is_explicitly_viable() { + Some(entry.block_hash) + } else { + maybe_earliest_unviable + } + }; + entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable; + + let recurse = entry.children.iter() + .cloned() + .map(move |c| (c, ViabilityUpdate(next_earliest_unviable))) + .collect(); + + (entry, recurse) + } +} + +// Propagate viability update to descendants of the given block. +// +// If the block entry provided is self-unviable, then it's assumed that an +// unviability update needs to be propagated to descendants. +// +// If the block entry provided is self-viable, then it's assumed that a +// viability update needs to be propagated to descendants. +fn propagate_viability_update( + backend: &mut OverlayedBackend, + base: BlockEntry, +) -> Result<(), Error> { + enum BlockEntryRef { + Explicit(BlockEntry), + Hash(Hash), + } + + if !base.viability.is_parent_viable() { + // If the parent of the block is still unviable, + // then the `earliest_viable_ancestor` will not change + // regardless of the change in the block here. + // + // Furthermore, in such cases, the set of viable leaves + // does not change at all. + return Ok(()) + } + + let mut viable_leaves = backend.load_leaves()?; + + // A mapping of Block Hash -> number + // Where the hash is the hash of a viable block which has + // at least 1 unviable child. + // + // The number is the number of known unviable children which is known + // as the pivot count. + let mut viability_pivots = HashMap::new(); + + // If the base block is itself explicitly unviable, + // this will change to a `Some(base_hash)` after the first + // invocation. + let viability_update = ViabilityUpdate(None); + + // Recursively apply update to tree. + // + // As we go, we remove any blocks from the leaves which are no longer viable + // leaves. We also add blocks to the leaves-set which are obviously viable leaves. + // And we build up a frontier of blocks which may either be viable leaves or + // the ancestors of one. + let mut tree_frontier = vec![(BlockEntryRef::Explicit(base), viability_update)]; + while let Some((entry_ref, update)) = tree_frontier.pop() { + let entry = match entry_ref { + BlockEntryRef::Explicit(entry) => entry, + BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? { + None => { + tracing::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Missing expected block entry" + ); + + continue; + } + Some(entry) => entry, + } + }; + + let (new_entry, children) = update.apply(entry); + + if new_entry.viability.is_viable() { + // A block which is viable has a parent which is obviously not + // in the viable leaves set. + viable_leaves.remove(&new_entry.parent_hash); + + // Furthermore, if the block is viable and has no children, + // it is viable by definition. + if new_entry.children.is_empty() { + viable_leaves.insert(new_entry.leaf_entry()); + } + } else { + // A block which is not viable is certainly not a viable leaf. + viable_leaves.remove(&new_entry.block_hash); + + // When the parent is viable but the entry itself is not, that means + // that the parent is a viability pivot. As we visit the children + // of a viability pivot, we build up an exhaustive pivot count. + if new_entry.viability.is_parent_viable() { + *viability_pivots.entry(new_entry.parent_hash).or_insert(0) += 1; + } + } + + backend.write_block_entry(new_entry); + + tree_frontier.extend( + children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update)) + ); + } + + // Revisit the viability pivots now that we've traversed the entire subtree. + // After this point, the viable leaves set is fully updated. A proof follows. + // + // If the base has become unviable, then we've iterated into all descendants, + // made them unviable and removed them from the set. We know that the parent is + // viable as this function is a no-op otherwise, so we need to see if the parent + // has other children or not. + // + // If the base has become viable, then we've iterated into all descendants, + // and found all blocks which are viable and have no children. We've already added + // those blocks to the leaf set, but what we haven't detected + // is blocks which are viable and have children, but all of the children are + // unviable. + // + // The solution of viability pivots addresses both of these: + // + // When the base has become unviable, the parent's viability is unchanged and therefore + // any leaves descending from parent but not base are still in the viable leaves set. + // If the parent has only one child which is the base, the parent is now a viable leaf. + // We've already visited the base in recursive search so the set of pivots should + // contain only a single entry `(parent, 1)`. qed. + // + // When the base has become viable, we've already iterated into every descendant + // of the base and thus have collected a set of pivots whose corresponding pivot + // counts have already been exhaustively computed from their children. qed. + for (pivot, pivot_count) in viability_pivots { + match backend.load_block_entry(&pivot)? { + None => { + // This means the block is finalized. We might reach this + // code path when the base is a child of the finalized block + // and has become unviable. + // + // Each such child is the root of its own tree + // which, as an invariant, does not depend on the viability + // of the finalized block. So no siblings need to be inspected + // and we can ignore it safely. + // + // Furthermore, if the set of viable leaves is empty, the + // finalized block is implicitly the viable leaf. + continue + } + Some(entry) => { + if entry.children.len() == pivot_count { + viable_leaves.insert(entry.leaf_entry()); + } + } + } + } + + backend.write_leaves(viable_leaves); + + Ok(()) +} + +/// Imports a new block and applies any reversions to ancestors. +pub(crate) fn import_block( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_header: Header, + weight: Weight, +) -> Result<(), Error> { + let logs = extract_reversion_logs(&block_header); + + add_block(backend, block_hash, &block_header, weight)?; + apply_reversions( + backend, + block_hash, + block_header.number, + logs, + )?; + + Ok(()) +} + +// Extract all reversion logs from a header in ascending order. +// +// Ignores logs with number >= the block header number. +fn extract_reversion_logs(header: &Header) -> Vec { + let number = header.number; + let mut logs = header.digest.logs() + .iter() + .enumerate() + .filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) { + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + index = i, + block_hash = ?header.hash(), + "Digest item failed to encode" + ); + + None + } + Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), + Ok(Some(ConsensusLog::Revert(b))) => { + tracing::warn!( + target: LOG_TARGET, + revert_target = b, + block_number = number, + block_hash = ?header.hash(), + "Block issued invalid revert digest targeting itself or future" + ); + + None + } + Ok(_) => None, + }) + .collect::>(); + + logs.sort(); + + logs +} + +// Load the given ancestor's block entry, in descending order from the `block_hash`. +// The ancestor_number must be at least one block less than the `block_number`. +// +// The returned entry will be `None` if the range is invalid or any block in the path had +// no entry present. If any block entry was missing, it can safely be assumed to +// be finalized. +fn load_ancestor( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_number: BlockNumber, + ancestor_number: BlockNumber, +) -> Result, Error> { + if block_number <= ancestor_number { return Ok(None) } + + let mut current_hash = block_hash; + let mut current_entry = None; + + let segment_length = (block_number - ancestor_number) + 1; + for _ in std::iter::repeat(()).take(segment_length as usize) { + match backend.load_block_entry(¤t_hash)? { + None => return Ok(None), + Some(entry) => { + let parent_hash = entry.parent_hash; + current_entry = Some(entry); + current_hash = parent_hash; + } + } + } + + // Current entry should always be `Some` here. + Ok(current_entry) +} + +// Add a new block to the tree, which is assumed to be unreverted and unapproved, +// but not stagnant. It inherits viability from its parent, if any. +// +// This updates the parent entry, if any, and updates the viable leaves set accordingly. +// This also schedules a stagnation-check update and adds the block to the blocks-by-number +// mapping. +fn add_block( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_header: &Header, + weight: Weight, +) -> Result<(), Error> { + let parent_hash = block_header.parent_hash; + + let mut leaves = backend.load_leaves()?; + let parent_entry = backend.load_block_entry(&parent_hash)?; + + let inherited_viability = parent_entry.as_ref() + .and_then(|parent| parent.non_viable_ancestor_for_child()); + + // 1. Add the block to the DB assuming it's not reverted. + backend.write_block_entry( + BlockEntry { + block_hash, + parent_hash, + children: Vec::new(), + viability: ViabilityCriteria { + earliest_unviable_ancestor: inherited_viability, + explicitly_reverted: false, + approval: Approval::Unapproved, + }, + weight, + } + ); + + // 2. Update leaves if parent was a viable leaf or the parent is unknown. + if leaves.remove(&parent_hash) || parent_entry.is_none() { + leaves.insert(LeafEntry { block_hash, weight }); + backend.write_leaves(leaves); + } + + // 3. Update and write the parent + if let Some(mut parent_entry) = parent_entry { + parent_entry.children.push(block_hash); + backend.write_block_entry(parent_entry); + } + + // 4. Add to blocks-by-number. + let mut blocks_by_number = backend.load_blocks_by_number(block_header.number)?; + blocks_by_number.push(block_hash); + backend.write_blocks_by_number(block_header.number, blocks_by_number); + + // 5. Add stagnation timeout. + let stagnant_at = crate::stagnant_timeout_from_now(); + let mut stagnant_at_list = backend.load_stagnant_at(stagnant_at)?; + stagnant_at_list.push(block_hash); + backend.write_stagnant_at(stagnant_at, stagnant_at_list); + + Ok(()) +} + +// Assuming that a block is already imported, accepts the number of the block +// as well as a list of reversions triggered by the block in ascending order. +fn apply_reversions( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_number: BlockNumber, + reversions: Vec, +) -> Result<(), Error> { + // Note: since revert numbers are in ascending order, the expensive propagation + // of unviability is only heavy on the first log. + for revert_number in reversions { + let mut ancestor_entry = match load_ancestor( + backend, + block_hash, + block_number, + revert_number, + )? { + None => { + tracing::warn!( + target: LOG_TARGET, + ?block_hash, + block_number, + revert_target = revert_number, + "The hammer has dropped. \ + A block has indicated that its finalized ancestor be reverted. \ + Please inform an adult.", + ); + + continue + } + Some(ancestor_entry) => { + tracing::info!( + target: LOG_TARGET, + ?block_hash, + block_number, + revert_target = revert_number, + revert_hash = ?ancestor_entry.block_hash, + "A block has signaled that its ancestor be reverted due to a bad parachain block.", + ); + + ancestor_entry + } + }; + + ancestor_entry.viability.explicitly_reverted = true; + backend.write_block_entry(ancestor_entry.clone()); + + propagate_viability_update(backend, ancestor_entry)?; + } + + Ok(()) +} From ec481186fd47d2a435a4dbee74956ed8a419371f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 22:04:13 +0100 Subject: [PATCH 31/79] implement block finality update --- node/core/chain-selection/src/backend.rs | 6 +- node/core/chain-selection/src/tree.rs | 102 ++++++++++++++++++++++- 2 files changed, 104 insertions(+), 4 deletions(-) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index e8531df962a9..ab67ec5312b6 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -127,7 +127,11 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { } pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec) { - self.blocks_by_number.insert(number, Some(blocks)); + if blocks.is_empty() { + self.blocks_by_number.insert(number, None); + } else { + self.blocks_by_number.insert(number, Some(blocks)); + } } pub(super) fn delete_blocks_by_number(&mut self, number: BlockNumber) { diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 5b059f468b5e..f96cdc2a55d2 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -87,7 +87,8 @@ impl ViabilityUpdate { } } -// Propagate viability update to descendants of the given block. +// Propagate viability update to descendants of the given block. This writes +// the `base` entry as well as all descendants. // // If the block entry provided is self-unviable, then it's assumed that an // unviability update needs to be propagated to descendants. @@ -437,10 +438,105 @@ fn apply_reversions( }; ancestor_entry.viability.explicitly_reverted = true; - backend.write_block_entry(ancestor_entry.clone()); - propagate_viability_update(backend, ancestor_entry)?; } Ok(()) } + +/// Finalize a block with the given number and hash. +/// +/// This will prune all sub-trees not descending from the given block, +/// all block entries at or before the given height, +/// and will update the viability of all sub-trees descending from the given +/// block if the finalized block was not viable. +/// +/// This is assumed to start with a fresh backend, and will produce +/// an overlay over the backend with all the changes applied. +pub(super) fn finalize_block<'a, B: Backend + 'a>( + backend: &'a B, + finalized_hash: Hash, + finalized_number: BlockNumber, +) -> Result, Error> { + let earliest_stored_number = backend.load_first_block_number()?; + let mut backend = OverlayedBackend::new(backend); + + let earliest_stored_number = match earliest_stored_number { + None => { + // This implies that there are no unfinalized blocks and hence nothing + // to update. + return Ok(backend); + } + Some(e) => e, + }; + + // Walk all numbers up to the finalized number and remove those entries. + for number in earliest_stored_number..finalized_number { + let blocks_at = backend.load_blocks_by_number(number)?; + backend.delete_blocks_by_number(number); + + for block in blocks_at { + backend.delete_block_entry(&block); + } + } + + // Remove all blocks at the finalized height, with the exception of the finalized block, + // and their descendants, recursively. + { + let blocks_at_finalized_height = backend.load_blocks_by_number(finalized_number)?; + backend.delete_blocks_by_number(finalized_number); + + let mut frontier: Vec<_> = blocks_at_finalized_height + .into_iter() + .filter(|h| h != &finalized_hash) + .map(|h| (h, finalized_number)) + .collect(); + + while let Some((dead_hash, dead_number)) = frontier.pop() { + let entry = backend.load_block_entry(&dead_hash)?; + backend.delete_block_entry(&dead_hash); + + // This does a few extra `clone`s but is unlikely to be + // a bottleneck. Code complexity is very low as a result. + let mut blocks_at_height = backend.load_blocks_by_number(dead_number)?; + blocks_at_height.retain(|h| h != &dead_hash); + backend.write_blocks_by_number(dead_number, blocks_at_height); + + // Add all children to the frontier. + let next_height = dead_number + 1; + frontier.extend( + entry.into_iter().flat_map(|e| e.children).map(|h| (h, next_height)) + ); + } + } + + // Visit and remove the finalized block, fetching its children. + let children_of_finalized = { + let finalized_entry = backend.load_block_entry(&finalized_hash)?; + backend.delete_block_entry(&finalized_hash); + + finalized_entry.into_iter().flat_map(|e| e.children) + }; + + // Update the viability of each child. + for child in children_of_finalized { + if let Some(mut child) = backend.load_block_entry(&child)? { + // Finalized blocks are always viable. + child.viability.earliest_unviable_ancestor = None; + + propagate_viability_update(&mut backend, child)?; + } else { + tracing::warn!( + target: LOG_TARGET, + ?finalized_hash, + finalized_number, + child_hash = ?child, + "Missing child of finalized block", + ); + + // No need to do anything, but this is an inconsistent state. + } + } + + Ok(backend) +} From 82c1817d7a58e042fdefda2841636dd3244d6151 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 22:18:29 +0100 Subject: [PATCH 32/79] Implement block approval routine --- node/core/chain-selection/src/tree.rs | 33 ++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index f96cdc2a55d2..a110ae385258 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -526,7 +526,7 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( propagate_viability_update(&mut backend, child)?; } else { - tracing::warn!( + tracing::debug!( target: LOG_TARGET, ?finalized_hash, finalized_number, @@ -540,3 +540,34 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( Ok(backend) } + +/// Mark a block as approved and update the viability of itself and its +/// descendants accordingly. +pub(super) fn approve_block( + backend: &mut OverlayedBackend, + approved_hash: Hash, +) -> Result<(), Error> { + if let Some(mut entry) = backend.load_block_entry(&approved_hash)? { + let was_viable = entry.viability.is_viable(); + entry.viability.approval = Approval::Approved; + let is_viable = entry.viability.is_viable(); + + // Approval can change the viability in only one direction. + // If the viability has changed, then we propagate that to children + // and recalculate the viable leaf set. + if !was_viable && is_viable { + propagate_viability_update(backend, entry)?; + } else { + backend.write_block_entry(entry); + } + + } else { + tracing::debug!( + target: LOG_TARGET, + block_hash = ?approved_hash, + "Missing entry for freshly-approved block. Ignoring" + ); + } + + Ok(()) +} From 6f2edf3e1967f34bddc8b8c37c9558cad171ff2d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 22:27:32 +0100 Subject: [PATCH 33/79] implement stagnant detection --- node/core/chain-selection/src/backend.rs | 3 +- node/core/chain-selection/src/tree.rs | 45 +++++++++++++++++++++++- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index ab67ec5312b6..88a8dc15d7c8 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -45,7 +45,8 @@ pub(super) trait Backend { fn load_leaves(&self) -> Result; /// Load the stagnant list at the given timestamp. fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error>; - /// Load all stagnant lists up to and including the given unix timestamp. + /// Load all stagnant lists up to and including the given unix timestamp + /// in ascending order. fn load_stagnant_at_up_to(&self, up_to: Timestamp) -> Result)>, Error>; /// Load the earliest kept block number. diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index a110ae385258..9cc88071dea5 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -30,6 +30,7 @@ use std::collections::HashMap; use super::{ LOG_TARGET, Approval, BlockEntry, Error, LeafEntry, LeafEntrySet, ViabilityCriteria, Weight, + Timestamp, }; use crate::backend::{Backend, OverlayedBackend}; @@ -88,7 +89,8 @@ impl ViabilityUpdate { } // Propagate viability update to descendants of the given block. This writes -// the `base` entry as well as all descendants. +// the `base` entry as well as all descendants. If the parent of the block +// entry is not viable, this wlil not affect any descendants. // // If the block entry provided is self-unviable, then it's assumed that an // unviability update needs to be propagated to descendants. @@ -111,6 +113,7 @@ fn propagate_viability_update( // // Furthermore, in such cases, the set of viable leaves // does not change at all. + backend.write_block_entry(base); return Ok(()) } @@ -476,6 +479,7 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( backend.delete_blocks_by_number(number); for block in blocks_at { + // TODO [now]: remove from viable leaves. backend.delete_block_entry(&block); } } @@ -495,6 +499,7 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( while let Some((dead_hash, dead_number)) = frontier.pop() { let entry = backend.load_block_entry(&dead_hash)?; backend.delete_block_entry(&dead_hash); + // TODO [now]: remove from viable leaves. // This does a few extra `clone`s but is unlikely to be // a bottleneck. Code complexity is very low as a result. @@ -514,6 +519,7 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( let children_of_finalized = { let finalized_entry = backend.load_block_entry(&finalized_hash)?; backend.delete_block_entry(&finalized_hash); + // TODO [now]: remove from viable leaves. finalized_entry.into_iter().flat_map(|e| e.children) }; @@ -571,3 +577,40 @@ pub(super) fn approve_block( Ok(()) } + +/// Check whether any blocks up to the given timestamp are stagnant and update +/// accordingly. +/// +/// This accepts a fresh backend and returns an overlay on top of it representing +/// all changes made. +pub(super) fn detect_stagnant<'a, B: 'a + Backend>( + backend: &'a B, + up_to: Timestamp, +) -> Result, Error> { + let stagnant_up_to = backend.load_stagnant_at_up_to(up_to)?; + let mut backend = OverlayedBackend::new(backend); + + // As this is in ascending order, only the earliest stagnant + // blocks will involve heavy viability propagations. + for (timestamp, maybe_stagnant) in stagnant_up_to { + backend.delete_stagnant_at(timestamp); + + for block_hash in maybe_stagnant { + if let Some(mut entry) = backend.load_block_entry(&block_hash)? { + let was_viable = entry.viability.is_viable(); + if let Approval::Unapproved = entry.viability.approval { + entry.viability.approval = Approval::Stagnant; + } + let is_viable = entry.viability.is_viable(); + + if was_viable && !is_viable { + propagate_viability_update(&mut backend, entry)?; + } else { + backend.write_block_entry(entry); + } + } + } + } + + Ok(backend) +} From a1d91692d16419eaabe232349ba53c84e02c65b9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 22:30:04 +0100 Subject: [PATCH 34/79] ensure blocks pruned on finality are removed from the active leaves set --- node/core/chain-selection/src/tree.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 9cc88071dea5..4c4eace50101 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -473,13 +473,15 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( Some(e) => e, }; + let mut viable_leaves = backend.load_leaves()?; + // Walk all numbers up to the finalized number and remove those entries. for number in earliest_stored_number..finalized_number { let blocks_at = backend.load_blocks_by_number(number)?; backend.delete_blocks_by_number(number); for block in blocks_at { - // TODO [now]: remove from viable leaves. + viable_leaves.remove(&block); backend.delete_block_entry(&block); } } @@ -499,7 +501,7 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( while let Some((dead_hash, dead_number)) = frontier.pop() { let entry = backend.load_block_entry(&dead_hash)?; backend.delete_block_entry(&dead_hash); - // TODO [now]: remove from viable leaves. + viable_leaves.remove(&dead_hash); // This does a few extra `clone`s but is unlikely to be // a bottleneck. Code complexity is very low as a result. @@ -519,11 +521,13 @@ pub(super) fn finalize_block<'a, B: Backend + 'a>( let children_of_finalized = { let finalized_entry = backend.load_block_entry(&finalized_hash)?; backend.delete_block_entry(&finalized_hash); - // TODO [now]: remove from viable leaves. + viable_leaves.remove(&finalized_hash); finalized_entry.into_iter().flat_map(|e| e.children) }; + backend.write_leaves(viable_leaves); + // Update the viability of each child. for child in children_of_finalized { if let Some(mut child) = backend.load_block_entry(&child)? { From a969a2b3e4b3f7e53d781fb26758472127915001 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 22:34:15 +0100 Subject: [PATCH 35/79] write down some planned test cases --- node/core/chain-selection/src/tree.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 4c4eace50101..582233d9f974 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -618,3 +618,18 @@ pub(super) fn detect_stagnant<'a, B: 'a + Backend>( Ok(backend) } + +#[cfg(test)] +mod tests { + use super::*; + + // TODO [now]: importing a block without reversion + // TODO [now]: importing a block with reversion + + // TODO [now]: finalize a viable block + // TODO [now]: finalize an unviable block with viable descendants + // TODO [now]: finalize an unviable block with unviable descendants down the line + + // TODO [now]: mark blocks as stagnant. + // TODO [now]: approve stagnant block with unviable descendant. +} From 6ed7ac4a15545e5e9da349ff56e12203f201778f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 22:49:41 +0100 Subject: [PATCH 36/79] floww --- node/core/chain-selection/src/backend.rs | 3 +- node/core/chain-selection/src/lib.rs | 41 +++++++++++++++++++++--- 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index 88a8dc15d7c8..9a0264ba91df 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -55,7 +55,8 @@ pub(super) trait Backend { fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error>; /// Atomically write the list of operations, with later operations taking precedence over prior. - fn write(&mut self, ops: Vec) -> Result<(), Error>; + fn write(&mut self, ops: I) -> Result<(), Error> + where I: IntoIterator; } /// An in-memory overlay over the backend. diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 817d406376bc..08093b3ac77a 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -248,13 +248,29 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) return Ok(()) } FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { - unimplemented!() + for leaf in update.activated { + let write_ops = handle_active_leaf( + ctx, + &*backend, + leaf.hash, + ).await?; + + backend.write(write_ops)?; + } } - FromOverseer::Signal(OverseerSignal::BlockFinalized(_, _)) => { - unimplemented!() + FromOverseer::Signal(OverseerSignal::BlockFinalized(h, n)) => { + handle_finalized_block(backend, h, n)? } - FromOverseer::Communication { msg } => { - unimplemented!() + FromOverseer::Communication { msg } => match msg { + ChainSelectionMessage::Approved(hash) => { + unimplemented!() + } + ChainSelectionMessage::Leaves(tx) => { + unimplemented!() + } + ChainSelectionMessage::BestLeafContaining(required, tx) => { + unimplemented!() + } } }; } @@ -321,3 +337,18 @@ async fn handle_active_leaf( Ok(overlay.into_write_ops().collect()) } + +// Handle a finalized block event. +fn handle_finalized_block( + backend: &mut impl Backend, + finalized_hash: Hash, + finalized_number: BlockNumber, +) -> Result<(), Error> { + let ops = crate::tree::finalize_block( + &*backend, + finalized_hash, + finalized_number, + )?.into_write_ops(); + + backend.write(ops) +} From 0577ed69d0086401b0a48433b46ae785fb077e70 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 23:02:56 +0100 Subject: [PATCH 37/79] leaf loading --- node/core/chain-selection/src/lib.rs | 49 +++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 08093b3ac77a..f00f13c4f226 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -119,6 +119,10 @@ impl LeafEntrySet { } } } + + fn into_hashes_descending(self) -> Vec { + self.inner.into_iter().map(|e| e.block_hash).collect() + } } #[derive(Debug, Clone)] @@ -263,10 +267,11 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) } FromOverseer::Communication { msg } => match msg { ChainSelectionMessage::Approved(hash) => { - unimplemented!() + handle_approved_block(backend, hash)? } ChainSelectionMessage::Leaves(tx) => { - unimplemented!() + let leaves = load_leaves(ctx, &*backend).await?; + let _ = tx.send(leaves); } ChainSelectionMessage::BestLeafContaining(required, tx) => { unimplemented!() @@ -276,9 +281,9 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) } } -async fn fetch_finalized_number( +async fn fetch_finalized( ctx: &mut impl SubsystemContext, -) -> Result { +) -> Result<(Hash, BlockNumber), Error> { unimplemented!() } @@ -310,7 +315,7 @@ async fn handle_active_leaf( ) -> Result, Error> { let lower_bound = match backend.load_first_block_number()? { Some(l) => l, - None => fetch_finalized_number(ctx).await?, + None => fetch_finalized(ctx).await?.1, }; let header = match fetch_header(ctx, hash).await? { @@ -352,3 +357,37 @@ fn handle_finalized_block( backend.write(ops) } + +// Handle an approved block event. +fn handle_approved_block( + backend: &mut impl Backend, + approved_block: Hash, +) -> Result<(), Error> { + let ops = { + let mut overlay = OverlayedBackend::new(&*backend); + + crate::tree::approve_block( + &mut overlay, + approved_block, + )?; + + overlay.into_write_ops() + }; + + backend.write(ops) +} + +// Load the leaves from the backend. If there are no leaves, then return +// the finalized block. +async fn load_leaves( + ctx: &mut impl SubsystemContext, + backend: &impl Backend, +) -> Result, Error> { + let leaves = backend.load_leaves()?.into_hashes_descending(); + if leaves.is_empty() { + let finalized_hash = fetch_finalized(ctx).await?.0; + Ok(vec![finalized_hash]) + } else { + Ok(leaves) + } +} From c10d52c7447699e70302edc55bdba6cd6bf15f2e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 23:15:38 +0100 Subject: [PATCH 38/79] implement best_leaf_containing --- node/core/chain-selection/src/backend.rs | 48 ++++++++++++++++++++++++ node/core/chain-selection/src/lib.rs | 17 +++++++-- 2 files changed, 61 insertions(+), 4 deletions(-) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index 9a0264ba91df..44bc5858df05 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -178,3 +178,51 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { .chain(stagnant_at_ops) } } + +/// Attempt to find the given ancestor in the chain with given head. +/// +/// If the ancestor is the most recently finalized block, and the `head` is +/// a known unfinalized block, this will return `true`. +/// +/// If the ancestor is an unfinalized block and `head` is known, this will +/// return true if `ancestor` is in `head`'s chain. +/// +/// If the ancestor is an older finalized block, this will return `false`. +fn contains_ancestor( + backend: &impl Backend, + head: Hash, + ancestor: Hash, +) -> Result { + let mut current_hash = head; + loop { + if current_hash == ancestor { return Ok(true) } + match backend.load_block_entry(¤t_hash)? { + Some(e) => { current_hash = e.parent_hash } + None => break + } + } + + Ok(false) +} + +/// This returns the best unfinalized leaf containing the required block. +/// +/// If the required block is finalized but not the most recent finalized block, +/// this will return `None`. +/// +/// If the required block is unfinalized but not an ancestor of any viable leaf, +/// this will return `None`. +pub(super) fn find_best_leaf_containing( + backend: &impl Backend, + required: Hash, +) -> Result, Error> { + let leaves = backend.load_leaves()?; + for leaf in leaves.into_hashes_descending() { + if contains_ancestor(backend, leaf, required)? { + return Ok(Some(leaf)) + } + } + + // If there are no viable leaves containing the ancestor + Ok(None) +} diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index f00f13c4f226..517607d26bc5 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -120,8 +120,8 @@ impl LeafEntrySet { } } - fn into_hashes_descending(self) -> Vec { - self.inner.into_iter().map(|e| e.block_hash).collect() + fn into_hashes_descending(self) -> impl IntoIterator { + self.inner.into_iter().map(|e| e.block_hash) } } @@ -274,7 +274,12 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) let _ = tx.send(leaves); } ChainSelectionMessage::BestLeafContaining(required, tx) => { - unimplemented!() + let best_containing = crate::backend::find_best_leaf_containing( + &*backend, + required, + )?; + + let _ = tx.send(best_containing); } } }; @@ -383,7 +388,11 @@ async fn load_leaves( ctx: &mut impl SubsystemContext, backend: &impl Backend, ) -> Result, Error> { - let leaves = backend.load_leaves()?.into_hashes_descending(); + let leaves: Vec<_> = backend.load_leaves()? + .into_hashes_descending() + .into_iter() + .collect(); + if leaves.is_empty() { let finalized_hash = fetch_finalized(ctx).await?.0; Ok(vec![finalized_hash]) From 5407dae8bbbdadcbb68702584f22171c57923f91 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Jun 2021 23:16:45 +0100 Subject: [PATCH 39/79] write down a few more tests to do --- node/core/chain-selection/src/backend.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index 44bc5858df05..bceb8d20d419 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -226,3 +226,13 @@ pub(super) fn find_best_leaf_containing( // If there are no viable leaves containing the ancestor Ok(None) } + +#[cfg(test)] +mod tests { + use super::*; + + // TODO [now]; test find best leaf containing with no leaves. + // TODO [now]: find best leaf containing when required is finalized + // TODO [now]: find best leaf containing when required is unfinalized. + // TODO [now]: find best leaf containing when required is ancestor of many leaves. +} From f975bf2ed2b1fdfb3fc03090e5ed6d6c70f2f12f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 00:33:23 +0100 Subject: [PATCH 40/79] remove dependence of tree on header --- node/core/chain-selection/src/backend.rs | 2 +- node/core/chain-selection/src/lib.rs | 53 +++++++++++++++++++- node/core/chain-selection/src/tree.rs | 64 ++++-------------------- 3 files changed, 63 insertions(+), 56 deletions(-) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index bceb8d20d419..91c4df1a003e 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -21,7 +21,7 @@ //! [`Backend`], maintaining consistency between queries and temporary writes, //! before any commit to the underlying storage is made. -use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; +use polkadot_primitives::v1::{BlockNumber, Hash, ConsensusLog}; use std::collections::HashMap; diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 517607d26bc5..cb3fc4ad3d5a 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -16,7 +16,7 @@ //! Implements the Chain Selection Subsystem. -use polkadot_primitives::v1::{BlockNumber, Hash, Header}; +use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; use polkadot_subsystem::{ Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, OverseerSignal, FromOverseer, @@ -342,12 +342,61 @@ async fn handle_active_leaf( // for this, we want ascending order. for (hash, header) in new_blocks.into_iter().rev() { let weight = unimplemented!(); - crate::tree::import_block(&mut overlay, hash, header, weight)?; + let reversion_logs = extract_reversion_logs(&header); + crate::tree::import_block( + &mut overlay, + hash, + header.number, + header.parent_hash, + reversion_logs, + weight, + )?; } Ok(overlay.into_write_ops().collect()) } +// Extract all reversion logs from a header in ascending order. +// +// Ignores logs with number >= the block header number. +fn extract_reversion_logs(header: &Header) -> Vec { + let number = header.number; + let mut logs = header.digest.logs() + .iter() + .enumerate() + .filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) { + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + index = i, + block_hash = ?header.hash(), + "Digest item failed to encode" + ); + + None + } + Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), + Ok(Some(ConsensusLog::Revert(b))) => { + tracing::warn!( + target: LOG_TARGET, + revert_target = b, + block_number = number, + block_hash = ?header.hash(), + "Block issued invalid revert digest targeting itself or future" + ); + + None + } + Ok(_) => None, + }) + .collect::>(); + + logs.sort(); + + logs +} + // Handle a finalized block event. fn handle_finalized_block( backend: &mut impl Backend, diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 582233d9f974..46b282c0cdbc 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -23,7 +23,7 @@ //! Each direct descendant of the finalized block acts as its own sub-tree, //! and as the finalized block advances, orphaned sub-trees are entirely pruned. -use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; +use polkadot_primitives::v1::{BlockNumber, Hash, Header}; use std::collections::HashMap; @@ -245,63 +245,22 @@ fn propagate_viability_update( pub(crate) fn import_block( backend: &mut OverlayedBackend, block_hash: Hash, - block_header: Header, + block_number: BlockNumber, + parent_hash: Hash, + reversion_logs: Vec, weight: Weight, ) -> Result<(), Error> { - let logs = extract_reversion_logs(&block_header); - - add_block(backend, block_hash, &block_header, weight)?; + add_block(backend, block_hash, block_number, parent_hash, weight)?; apply_reversions( backend, block_hash, - block_header.number, - logs, + block_number, + reversion_logs, )?; Ok(()) } -// Extract all reversion logs from a header in ascending order. -// -// Ignores logs with number >= the block header number. -fn extract_reversion_logs(header: &Header) -> Vec { - let number = header.number; - let mut logs = header.digest.logs() - .iter() - .enumerate() - .filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) { - Err(e) => { - tracing::warn!( - target: LOG_TARGET, - err = ?e, - index = i, - block_hash = ?header.hash(), - "Digest item failed to encode" - ); - - None - } - Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), - Ok(Some(ConsensusLog::Revert(b))) => { - tracing::warn!( - target: LOG_TARGET, - revert_target = b, - block_number = number, - block_hash = ?header.hash(), - "Block issued invalid revert digest targeting itself or future" - ); - - None - } - Ok(_) => None, - }) - .collect::>(); - - logs.sort(); - - logs -} - // Load the given ancestor's block entry, in descending order from the `block_hash`. // The ancestor_number must be at least one block less than the `block_number`. // @@ -344,11 +303,10 @@ fn load_ancestor( fn add_block( backend: &mut OverlayedBackend, block_hash: Hash, - block_header: &Header, + block_number: BlockNumber, + parent_hash: Hash, weight: Weight, ) -> Result<(), Error> { - let parent_hash = block_header.parent_hash; - let mut leaves = backend.load_leaves()?; let parent_entry = backend.load_block_entry(&parent_hash)?; @@ -383,9 +341,9 @@ fn add_block( } // 4. Add to blocks-by-number. - let mut blocks_by_number = backend.load_blocks_by_number(block_header.number)?; + let mut blocks_by_number = backend.load_blocks_by_number(block_number)?; blocks_by_number.push(block_hash); - backend.write_blocks_by_number(block_header.number, blocks_by_number); + backend.write_blocks_by_number(block_number, blocks_by_number); // 5. Add stagnation timeout. let stagnant_at = crate::stagnant_timeout_from_now(); From 2518a17606eb85f06ae5829789f0ce4b6b3159bf Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 06:55:56 +0100 Subject: [PATCH 41/79] guide: ChainApiMessage::BlockWeight --- roadmap/implementers-guide/src/node/utility/chain-api.md | 1 + .../implementers-guide/src/node/utility/chain-selection.md | 4 ++-- roadmap/implementers-guide/src/types/overseer-protocol.md | 5 +++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/roadmap/implementers-guide/src/node/utility/chain-api.md b/roadmap/implementers-guide/src/node/utility/chain-api.md index 7695b73a05e3..e9ef9b5695bc 100644 --- a/roadmap/implementers-guide/src/node/utility/chain-api.md +++ b/roadmap/implementers-guide/src/node/utility/chain-api.md @@ -15,6 +15,7 @@ On receipt of `ChainApiMessage`, answer the request and provide the response to Currently, the following requests are supported: * Block hash to number * Block hash to header +* Block weight * Finalized block number to hash * Last finalized block number * Ancestors diff --git a/roadmap/implementers-guide/src/node/utility/chain-selection.md b/roadmap/implementers-guide/src/node/utility/chain-selection.md index c64cb14efb44..3eeaf998f4fb 100644 --- a/roadmap/implementers-guide/src/node/utility/chain-selection.md +++ b/roadmap/implementers-guide/src/node/utility/chain-selection.md @@ -2,7 +2,7 @@ This subsystem implements the necessary metadata for the implementation of the [chain selection](../../protocol-chain-selection.md) portion of the protocol. -The subsystem wraps a database component which maintains a view of the unfinalized chain and records the properties of each block: whether the block is **viable**, whether it is **stagnant**, and whether it is **reverted**. It should also maintain an updated set of active leaves in accordance with this view, which should be cheap to query. +The subsystem wraps a database component which maintains a view of the unfinalized chain and records the properties of each block: whether the block is **viable**, whether it is **stagnant**, and whether it is **reverted**. It should also maintain an updated set of active leaves in accordance with this view, which should be cheap to query. Leaves are ordered descending first by weight and then by block number. This subsystem needs to update its information on the unfinalized chain: * On every leaf-activated signal @@ -14,7 +14,7 @@ Simple implementations of these updates do O(n_unfinalized_blocks) disk operatio ### `OverseerSignal::ActiveLeavesUpdate` -Determine all new blocks implicitly referenced by any new active leaves and add them to the view. Update the set of viable leaves accordingly +Determine all new blocks implicitly referenced by any new active leaves and add them to the view. Update the set of viable leaves accordingly. The weights of imported blocks can be determined by the [`ChainApiMessage::BlockWeight`](../../types/overseer-protocol.md#chain-api-message). ### `OverseerSignal::BlockFinalized` diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index 9376ce42eaf8..509c5b0ed765 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -299,6 +299,11 @@ enum ChainApiMessage { /// Request the block header by hash. /// Returns `None` if a block with the given hash is not present in the db. BlockHeader(Hash, ResponseChannel, Error>>), + /// Get the cumulative weight of the given block, by hash. + /// If the block or weight is unknown, this returns `None`. + /// + /// Weight is used for comparing blocks in a fork-choice rule. + BlockWeight(Hash, ResponseChannel, Error>>), /// Get the finalized block hash by number. /// Returns `None` if a block with the given number is not present in the db. /// Note: the caller must ensure the block is finalized. From 32d73a133c51f8e64532c13ed0e71debce8ae216 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 07:01:57 +0100 Subject: [PATCH 42/79] node: BlockWeight ChainAPI --- Cargo.lock | 320 +++++++++++++++++---------------- node/core/chain-api/src/lib.rs | 6 + node/primitives/src/lib.rs | 3 + node/subsystem/src/messages.rs | 10 +- 4 files changed, 179 insertions(+), 160 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13eeb2d77103..dad2ff5e0991 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,5 +1,7 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 3 + [[package]] name = "Inflector" version = "0.11.4" @@ -479,7 +481,7 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" dependencies = [ "beefy-primitives", "futures 0.3.15", @@ -507,7 +509,7 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" dependencies = [ "beefy-gadget", "beefy-primitives", @@ -528,7 +530,7 @@ dependencies = [ [[package]] name = "beefy-primitives" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" dependencies = [ "parity-scale-codec", "sp-api", @@ -1889,7 +1891,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", ] @@ -1907,7 +1909,7 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "3.1.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -1926,7 +1928,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "Inflector", "chrono", @@ -1949,7 +1951,7 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -1962,7 +1964,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -1977,7 +1979,7 @@ dependencies = [ [[package]] name = "frame-metadata" version = "13.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "serde", @@ -1988,7 +1990,7 @@ dependencies = [ [[package]] name = "frame-support" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "bitflags", "frame-metadata", @@ -2015,7 +2017,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "Inflector", "frame-support-procedural-tools", @@ -2027,7 +2029,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 1.0.0", @@ -2039,7 +2041,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro2", "quote", @@ -2049,7 +2051,7 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-metadata", "frame-support", @@ -2069,7 +2071,7 @@ dependencies = [ [[package]] name = "frame-system" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "impl-trait-for-tuples", @@ -2086,7 +2088,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -2100,7 +2102,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "sp-api", @@ -2109,7 +2111,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "parity-scale-codec", @@ -4042,7 +4044,7 @@ dependencies = [ [[package]] name = "max-encoded-len" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "impl-trait-for-tuples", "max-encoded-len-derive", @@ -4053,7 +4055,7 @@ dependencies = [ [[package]] name = "max-encoded-len-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -4556,7 +4558,7 @@ checksum = "13370dae44474229701bb69b90b4f4dca6404cb0357a2d50d635f1171dc3aa7b" [[package]] name = "pallet-authority-discovery" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -4571,7 +4573,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -4585,7 +4587,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4608,7 +4610,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4623,7 +4625,7 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "0.1.0" -source = "git+https://github.com/paritytech/grandpa-bridge-gadget#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" +source = "git+https://github.com/paritytech/grandpa-bridge-gadget?branch=master#a58179d4f59f4c31c7a09d5b0ce14df8450743a3" dependencies = [ "beefy-primitives", "frame-support", @@ -4638,7 +4640,7 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4674,7 +4676,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4690,7 +4692,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4705,7 +4707,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4726,7 +4728,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4743,7 +4745,7 @@ dependencies = [ [[package]] name = "pallet-gilt" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4757,7 +4759,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "3.1.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4779,7 +4781,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4794,7 +4796,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4813,7 +4815,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4829,7 +4831,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4844,7 +4846,7 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", @@ -4861,7 +4863,7 @@ dependencies = [ [[package]] name = "pallet-mmr-primitives" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -4877,7 +4879,7 @@ dependencies = [ [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4895,7 +4897,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4910,7 +4912,7 @@ dependencies = [ [[package]] name = "pallet-nicks" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -4923,7 +4925,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -4939,7 +4941,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4961,7 +4963,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -4977,7 +4979,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -4990,7 +4992,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "enumflags2", "frame-support", @@ -5004,7 +5006,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -5019,7 +5021,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -5039,7 +5041,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -5055,7 +5057,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -5068,7 +5070,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5092,7 +5094,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -5103,7 +5105,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "log", "sp-arithmetic", @@ -5112,7 +5114,7 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -5125,7 +5127,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -5143,7 +5145,7 @@ dependencies = [ [[package]] name = "pallet-tips" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -5158,7 +5160,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-support", "frame-system", @@ -5174,7 +5176,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -5191,7 +5193,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5202,7 +5204,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -5218,7 +5220,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-benchmarking", "frame-support", @@ -5233,7 +5235,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "enumflags2", "frame-benchmarking", @@ -7540,7 +7542,7 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "env_logger 0.8.2", "hex", @@ -7843,7 +7845,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "derive_more", @@ -7872,7 +7874,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "futures-timer 3.0.2", @@ -7895,7 +7897,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -7911,7 +7913,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7932,7 +7934,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -7943,7 +7945,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "chrono", "fdlimit", @@ -7981,7 +7983,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "fnv", @@ -8015,7 +8017,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "blake2-rfc", "hash-db", @@ -8045,7 +8047,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parking_lot 0.11.1", "sc-client-api", @@ -8057,7 +8059,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "derive_more", @@ -8103,7 +8105,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "futures 0.3.15", @@ -8127,7 +8129,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "fork-tree", "parity-scale-codec", @@ -8140,7 +8142,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "futures 0.3.15", @@ -8168,7 +8170,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "sc-client-api", "sp-authorship", @@ -8179,7 +8181,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "lazy_static", @@ -8208,7 +8210,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "parity-scale-codec", @@ -8225,7 +8227,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "log", "parity-scale-codec", @@ -8240,7 +8242,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "cfg-if 1.0.0", "libc", @@ -8259,7 +8261,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "derive_more", @@ -8300,7 +8302,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "finality-grandpa", @@ -8324,7 +8326,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-warp-sync" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "futures 0.3.15", @@ -8345,7 +8347,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "ansi_term 0.12.1", "futures 0.3.15", @@ -8363,7 +8365,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "derive_more", @@ -8383,7 +8385,7 @@ dependencies = [ [[package]] name = "sc-light" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "hash-db", "lazy_static", @@ -8402,7 +8404,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-std", "async-trait", @@ -8455,7 +8457,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "futures-timer 3.0.2", @@ -8472,7 +8474,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "bytes 0.5.6", "fnv", @@ -8500,7 +8502,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "libp2p", @@ -8513,7 +8515,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -8522,7 +8524,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "hash-db", @@ -8557,7 +8559,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "futures 0.3.15", @@ -8582,7 +8584,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.1.29", "jsonrpc-core", @@ -8600,7 +8602,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "directories", @@ -8665,7 +8667,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "log", "parity-scale-codec", @@ -8680,7 +8682,7 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -8700,7 +8702,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "chrono", "futures 0.3.15", @@ -8720,7 +8722,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "ansi_term 0.12.1", "atty", @@ -8757,7 +8759,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -8768,7 +8770,7 @@ dependencies = [ [[package]] name = "sc-transaction-graph" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "futures 0.3.15", @@ -8790,7 +8792,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "intervalier", @@ -9269,7 +9271,7 @@ dependencies = [ [[package]] name = "sp-allocator" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "log", "sp-core", @@ -9281,7 +9283,7 @@ dependencies = [ [[package]] name = "sp-api" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "hash-db", "log", @@ -9298,7 +9300,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "blake2-rfc", "proc-macro-crate 1.0.0", @@ -9310,7 +9312,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "max-encoded-len", "parity-scale-codec", @@ -9323,7 +9325,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "integer-sqrt", "num-traits", @@ -9337,7 +9339,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "sp-api", @@ -9349,7 +9351,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "parity-scale-codec", @@ -9361,7 +9363,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "sp-api", @@ -9373,7 +9375,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "log", @@ -9391,7 +9393,7 @@ dependencies = [ [[package]] name = "sp-chain-spec" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "serde", "serde_json", @@ -9400,7 +9402,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "futures 0.3.15", @@ -9427,7 +9429,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "merlin", @@ -9449,7 +9451,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "sp-arithmetic", @@ -9459,7 +9461,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -9471,7 +9473,7 @@ dependencies = [ [[package]] name = "sp-core" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "base58", "blake2-rfc", @@ -9516,7 +9518,7 @@ dependencies = [ [[package]] name = "sp-database" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "kvdb", "parking_lot 0.11.1", @@ -9525,7 +9527,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro2", "quote", @@ -9535,7 +9537,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "environmental", "parity-scale-codec", @@ -9546,7 +9548,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "finality-grandpa", "log", @@ -9563,7 +9565,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -9577,7 +9579,7 @@ dependencies = [ [[package]] name = "sp-io" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "hash-db", @@ -9602,7 +9604,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "lazy_static", "sp-core", @@ -9613,7 +9615,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "derive_more", @@ -9630,7 +9632,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "ruzstd", "zstd", @@ -9639,7 +9641,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "serde", @@ -9652,7 +9654,7 @@ dependencies = [ [[package]] name = "sp-npos-elections-compact" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -9663,7 +9665,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "sp-api", "sp-core", @@ -9673,7 +9675,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "backtrace", ] @@ -9681,7 +9683,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "rustc-hash", "serde", @@ -9692,7 +9694,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "either", "hash256-std-hasher", @@ -9714,7 +9716,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -9731,7 +9733,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "Inflector", "proc-macro-crate 1.0.0", @@ -9743,7 +9745,7 @@ dependencies = [ [[package]] name = "sp-serializer" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "serde", "serde_json", @@ -9752,7 +9754,7 @@ dependencies = [ [[package]] name = "sp-session" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "sp-api", @@ -9765,7 +9767,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -9775,7 +9777,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "hash-db", "log", @@ -9798,12 +9800,12 @@ dependencies = [ [[package]] name = "sp-std" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" [[package]] name = "sp-storage" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "impl-serde", "parity-scale-codec", @@ -9816,7 +9818,7 @@ dependencies = [ [[package]] name = "sp-tasks" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "log", "sp-core", @@ -9829,7 +9831,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "futures-timer 3.0.2", @@ -9846,7 +9848,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "erased-serde", "log", @@ -9864,7 +9866,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "derive_more", "futures 0.3.15", @@ -9880,7 +9882,7 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "log", @@ -9895,7 +9897,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "hash-db", "memory-db", @@ -9909,7 +9911,7 @@ dependencies = [ [[package]] name = "sp-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "futures-core", @@ -9921,7 +9923,7 @@ dependencies = [ [[package]] name = "sp-version" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "impl-serde", "parity-scale-codec", @@ -9934,7 +9936,7 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "parity-scale-codec", "proc-macro-crate 1.0.0", @@ -9946,7 +9948,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -10103,7 +10105,7 @@ dependencies = [ [[package]] name = "substrate-browser-utils" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "chrono", "console_error_panic_hook", @@ -10129,7 +10131,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "platforms", ] @@ -10137,7 +10139,7 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-system-rpc-runtime-api", "futures 0.3.15", @@ -10160,7 +10162,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-std", "derive_more", @@ -10174,7 +10176,7 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "async-trait", "futures 0.1.29", @@ -10203,7 +10205,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "futures 0.3.15", "substrate-test-utils-derive", @@ -10213,7 +10215,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "proc-macro-crate 1.0.0", "quote", @@ -10968,7 +10970,7 @@ checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" [[package]] name = "try-runtime-cli" version = "0.9.0" -source = "git+https://github.com/paritytech/substrate#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" +source = "git+https://github.com/paritytech/substrate?branch=master#34d4bb0ed47f7ec5ae26add9f9f8f244428a3e02" dependencies = [ "frame-try-runtime", "log", diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs index 0e6d3623c3e1..b991acba2477 100644 --- a/node/core/chain-api/src/lib.rs +++ b/node/core/chain-api/src/lib.rs @@ -104,6 +104,12 @@ where subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, + ChainApiMessage::BlockWeight(hash, response_channel) => { + // TODO [now] + + // choice to fork + let _ = response_channel.send(Some(69)) + } ChainApiMessage::FinalizedBlockHash(number, response_channel) => { let _timer = subsystem.metrics.time_finalized_block_hash(); // Note: we don't verify it's finalized diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index dddec2cfbe1c..674500d43443 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -57,6 +57,9 @@ pub const MAX_POV_SIZE: u32 = 20 * 1024 * 1024; /// The bomb limit for decompressing PoV blobs. pub const POV_BOMB_LIMIT: usize = MAX_POV_SIZE as usize; +/// The cumulative weight of a block in a fork-choice rule. +pub type BlockWeight = u64; + /// A statement, where the candidate receipt is included in the `Seconded` variant. /// /// This is the committed candidate receipt instead of the bare candidate receipt. As such, diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index f8ae3dead6ff..3291a7d69d22 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -37,7 +37,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_primitives::{ approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}, AvailableData, BabeEpoch, CandidateVotes, CollationGenerationConfig, ErasureChunk, PoV, - SignedDisputeStatement, SignedFullStatement, ValidationResult, + SignedDisputeStatement, SignedFullStatement, ValidationResult, BlockWeight, }; use polkadot_primitives::v1::{ AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateDescriptor, CandidateEvent, @@ -462,6 +462,14 @@ pub enum ChainApiMessage { /// Request the block header by hash. /// Returns `None` if a block with the given hash is not present in the db. BlockHeader(Hash, ChainApiResponseChannel>), + /// Get the cumulative weight of the given block, by hash. + /// If the block or weight is unknown, this returns `None`. + /// + /// Note: this the weight within the low-level fork-choice rule, + /// not the high-level one implemented in the chain-selection subsystem. + /// + /// Weight is used for comparing blocks in a fork-choice rule. + BlockWeight(Hash, ChainApiResponseChannel>), /// Request the finalized block hash by number. /// Returns `None` if a block with the given number is not present in the db. /// Note: the caller must ensure the block is finalized. From ca45cc3c450496df385f8b864aaef19a024861ef Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 07:03:34 +0100 Subject: [PATCH 43/79] fix compile issue --- node/core/chain-api/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs index b991acba2477..ff2ba17054fd 100644 --- a/node/core/chain-api/src/lib.rs +++ b/node/core/chain-api/src/lib.rs @@ -104,11 +104,11 @@ where subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, - ChainApiMessage::BlockWeight(hash, response_channel) => { + ChainApiMessage::BlockWeight(_, response_channel) => { // TODO [now] // choice to fork - let _ = response_channel.send(Some(69)) + let _ = response_channel.send(Ok(Some(69))); } ChainApiMessage::FinalizedBlockHash(number, response_channel) => { let _timer = subsystem.metrics.time_finalized_block_hash(); From 8a97b710dd56ade3f11721668bc774bd0f0cd98a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 07:04:40 +0100 Subject: [PATCH 44/79] note a few TODOs for the future --- node/core/chain-selection/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index cb3fc4ad3d5a..c3c83e39fb9e 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -289,6 +289,7 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) async fn fetch_finalized( ctx: &mut impl SubsystemContext, ) -> Result<(Hash, BlockNumber), Error> { + // TODO [now] unimplemented!() } @@ -341,6 +342,7 @@ async fn handle_active_leaf( // determine_new_blocks gives blocks in descending order. // for this, we want ascending order. for (hash, header) in new_blocks.into_iter().rev() { + // TODO [now]: if none, skip and warn (grimace). let weight = unimplemented!(); let reversion_logs = extract_reversion_logs(&header); crate::tree::import_block( From 05dc72ac72179d1afa20976a3479596f1aef7443 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 21:45:46 +0100 Subject: [PATCH 45/79] fetch block weight using new BlockWeight ChainAPI --- Cargo.lock | 1 + node/core/chain-selection/Cargo.toml | 1 + node/core/chain-selection/src/lib.rs | 34 +++++++++++++++++++++++---- node/core/chain-selection/src/tree.rs | 8 ++++--- 4 files changed, 36 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1be3d1cc8b45..c9bf6ade5d9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6103,6 +6103,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "parity-scale-codec", + "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml index 50e1799f9f2e..2bd27dc2527b 100644 --- a/node/core/chain-selection/Cargo.toml +++ b/node/core/chain-selection/Cargo.toml @@ -9,6 +9,7 @@ edition = "2018" futures = "0.3.15" tracing = "0.1.26" polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } kvdb = "0.9.0" diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index c3c83e39fb9e..1ac97fd96f6e 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -17,6 +17,7 @@ //! Implements the Chain Selection Subsystem. use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; +use polkadot_node_primitives::BlockWeight; use polkadot_subsystem::{ Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, OverseerSignal, FromOverseer, @@ -37,7 +38,6 @@ mod tree; const LOG_TARGET: &str = "parachain::chain-selection"; -type Weight = u64; type Timestamp = u64; #[derive(Debug, Clone)] @@ -87,7 +87,8 @@ impl ViabilityCriteria { #[derive(Debug, Clone)] struct LeafEntry { - weight: Weight, + weight: BlockWeight, + // TODO [now]: block number as well for weight tie-breaking block_hash: Hash, } @@ -131,7 +132,7 @@ struct BlockEntry { parent_hash: Hash, children: Vec, viability: ViabilityCriteria, - weight: Weight, + weight: BlockWeight, } impl BlockEntry { @@ -313,6 +314,16 @@ async fn fetch_header( } } +async fn fetch_block_weight( + ctx: &mut impl SubsystemContext, + hash: Hash, +) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ChainApiMessage::BlockWeight(hash, tx).into()).await; + + rx.await?.map_err(Into::into) +} + // Handle a new active leaf. async fn handle_active_leaf( ctx: &mut impl SubsystemContext, @@ -342,8 +353,21 @@ async fn handle_active_leaf( // determine_new_blocks gives blocks in descending order. // for this, we want ascending order. for (hash, header) in new_blocks.into_iter().rev() { - // TODO [now]: if none, skip and warn (grimace). - let weight = unimplemented!(); + let weight = match fetch_block_weight(ctx, hash).await? { + None => { + tracing::warn!( + target: LOG_TARGET, + ?hash, + "Missing block weight for new head. Skipping chain.", + ); + + // If we don't know the weight, we can't import the block. + // And none of its descendents either. + break; + } + Some(w) => w, + }; + let reversion_logs = extract_reversion_logs(&header); crate::tree::import_block( &mut overlay, diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 46b282c0cdbc..3ebd1a8e5f22 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -24,12 +24,14 @@ //! and as the finalized block advances, orphaned sub-trees are entirely pruned. use polkadot_primitives::v1::{BlockNumber, Hash, Header}; +use polkadot_node_primitives::BlockWeight; + use std::collections::HashMap; use super::{ LOG_TARGET, - Approval, BlockEntry, Error, LeafEntry, LeafEntrySet, ViabilityCriteria, Weight, + Approval, BlockEntry, Error, LeafEntry, LeafEntrySet, ViabilityCriteria, Timestamp, }; use crate::backend::{Backend, OverlayedBackend}; @@ -248,7 +250,7 @@ pub(crate) fn import_block( block_number: BlockNumber, parent_hash: Hash, reversion_logs: Vec, - weight: Weight, + weight: BlockWeight, ) -> Result<(), Error> { add_block(backend, block_hash, block_number, parent_hash, weight)?; apply_reversions( @@ -305,7 +307,7 @@ fn add_block( block_hash: Hash, block_number: BlockNumber, parent_hash: Hash, - weight: Weight, + weight: BlockWeight, ) -> Result<(), Error> { let mut leaves = backend.load_leaves()?; let parent_entry = backend.load_block_entry(&parent_hash)?; From f95d23e5137971b004d30b52a7d448bbec1f770e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 21:58:33 +0100 Subject: [PATCH 46/79] implement unimplemented --- node/core/chain-selection/src/lib.rs | 51 ++++++++++++++++++---------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 1ac97fd96f6e..e205d1fc8c2e 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -289,9 +289,28 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) async fn fetch_finalized( ctx: &mut impl SubsystemContext, -) -> Result<(Hash, BlockNumber), Error> { - // TODO [now] - unimplemented!() +) -> Result, Error> { + let (number_tx, number_rx) = oneshot::channel(); + let (hash_tx, hash_rx) = oneshot::channel(); + + ctx.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx).into()).await; + + let number = number_rx.await??; + + ctx.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx).into()).await; + + match hash_rx.await?? { + None => { + tracing::warn!( + target: LOG_TARGET, + number, + "Missing hash for finalized block number" + ); + + return Ok(None) + } + Some(h) => Ok(Some((h, number))) + } } async fn fetch_header( @@ -301,17 +320,7 @@ async fn fetch_header( let (h_tx, h_rx) = oneshot::channel(); ctx.send_message(ChainApiMessage::BlockHeader(hash, h_tx).into()).await; - match h_rx.await?? { - None => { - tracing::warn!( - target: LOG_TARGET, - ?hash, - "Missing header for new head", - ); - Ok(None) - } - Some(h) => Ok(Some(h)), - } + h_rx.await?.map_err(Into::into) } async fn fetch_block_weight( @@ -332,11 +341,18 @@ async fn handle_active_leaf( ) -> Result, Error> { let lower_bound = match backend.load_first_block_number()? { Some(l) => l, - None => fetch_finalized(ctx).await?.1, + None => fetch_finalized(ctx).await?.map_or(1, |(_, n)| n), }; let header = match fetch_header(ctx, hash).await? { - None => return Ok(Vec::new()), + None => { + tracing::warn!( + target: LOG_TARGET, + ?hash, + "Missing header for new head", + ); + return Ok(Vec::new()) + } Some(h) => h, }; @@ -469,8 +485,7 @@ async fn load_leaves( .collect(); if leaves.is_empty() { - let finalized_hash = fetch_finalized(ctx).await?.0; - Ok(vec![finalized_hash]) + Ok(fetch_finalized(ctx).await?.map_or(Vec::new(), |(h, _)| vec![h])) } else { Ok(leaves) } From 5c2d2d95701192fed198b86d38fb18bd3fb98c9e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 22:05:25 +0100 Subject: [PATCH 47/79] sort leaves by block number after weight --- node/core/chain-selection/src/lib.rs | 21 ++++++++++++++++++--- node/core/chain-selection/src/tree.rs | 3 ++- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index e205d1fc8c2e..73b9fdb8ca5a 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -85,13 +85,26 @@ impl ViabilityCriteria { } } -#[derive(Debug, Clone)] +// Light entries describing leaves of the chain. +// +// These are ordered first by weight and then by block number. +#[derive(Debug, Clone, PartialEq)] struct LeafEntry { weight: BlockWeight, - // TODO [now]: block number as well for weight tie-breaking + block_number: BlockNumber, block_hash: Hash, } +impl PartialOrd for LeafEntry { + fn partial_cmp(&self, other: &Self) -> Option { + if self.weight == other.weight { + self.block_number.partial_cmp(&other.block_number) + } else { + self.weight.partial_cmp(&other.weight) + } + } +} + #[derive(Debug, Clone)] struct LeafEntrySet { inner: Vec @@ -113,7 +126,7 @@ impl LeafEntrySet { } fn insert(&mut self, new: LeafEntry) { - match self.inner.iter().position(|e| e.weight < new.weight) { + match self.inner.iter().position(|e| e < &new) { None => self.inner.push(new), Some(i) => if self.inner[i].block_hash != new.block_hash { self.inner.insert(i, new); @@ -129,6 +142,7 @@ impl LeafEntrySet { #[derive(Debug, Clone)] struct BlockEntry { block_hash: Hash, + block_number: BlockNumber, parent_hash: Hash, children: Vec, viability: ViabilityCriteria, @@ -139,6 +153,7 @@ impl BlockEntry { fn leaf_entry(&self) -> LeafEntry { LeafEntry { block_hash: self.block_hash, + block_number: self.block_number, weight: self.weight, } } diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 3ebd1a8e5f22..5e308eef6205 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -319,6 +319,7 @@ fn add_block( backend.write_block_entry( BlockEntry { block_hash, + block_number, parent_hash, children: Vec::new(), viability: ViabilityCriteria { @@ -332,7 +333,7 @@ fn add_block( // 2. Update leaves if parent was a viable leaf or the parent is unknown. if leaves.remove(&parent_hash) || parent_entry.is_none() { - leaves.insert(LeafEntry { block_hash, weight }); + leaves.insert(LeafEntry { block_hash, block_number, weight }); backend.write_leaves(leaves); } From fc13846294d8ee7bed04298b76637652165c20d3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 22:18:23 +0100 Subject: [PATCH 48/79] remove warnings and add more TODOs --- node/core/chain-selection/src/backend.rs | 2 +- node/core/chain-selection/src/lib.rs | 63 +++++++++++++++++++++--- node/core/chain-selection/src/tree.rs | 6 ++- 3 files changed, 61 insertions(+), 10 deletions(-) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index 91c4df1a003e..b97340f442f5 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -21,7 +21,7 @@ //! [`Backend`], maintaining consistency between queries and temporary writes, //! before any commit to the underlying storage is made. -use polkadot_primitives::v1::{BlockNumber, Hash, ConsensusLog}; +use polkadot_primitives::v1::{BlockNumber, Hash}; use std::collections::HashMap; diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 73b9fdb8ca5a..35b1d5f07ad0 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -19,7 +19,7 @@ use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; use polkadot_node_primitives::BlockWeight; use polkadot_subsystem::{ - Subsystem, SubsystemContext, SubsystemResult, SubsystemError, SpawnedSubsystem, + Subsystem, SubsystemContext, SubsystemError, SpawnedSubsystem, OverseerSignal, FromOverseer, messages::{ChainSelectionMessage, ChainApiMessage}, errors::ChainApiError, @@ -27,8 +27,8 @@ use polkadot_subsystem::{ use parity_scale_codec::Error as CodecError; use futures::channel::oneshot; +use futures::prelude::*; -use std::collections::HashMap; use std::time::{UNIX_EPOCH, SystemTime}; use crate::backend::{Backend, OverlayedBackend, BackendWriteOp}; @@ -105,16 +105,12 @@ impl PartialOrd for LeafEntry { } } -#[derive(Debug, Clone)] +#[derive(Debug, Default, Clone)] struct LeafEntrySet { inner: Vec } impl LeafEntrySet { - fn contains(&self, hash: &Hash) -> bool { - self.inner.iter().position(|e| &e.block_hash == hash).is_some() - } - fn remove(&mut self, hash: &Hash) -> bool { match self.inner.iter().position(|e| &e.block_hash == hash) { None => false, @@ -228,6 +224,58 @@ fn stagnant_timeout_from_now() -> Timestamp { timestamp_now() + STAGNANT_TIMEOUT } +// TODO https://github.com/paritytech/polkadot/issues/3293: +// +// This is used just so we can have a public function that calls +// `run` and eliminates all the unused errors. +// +// Should be removed when the real implementation is done. +struct VoidBackend; + +impl Backend for VoidBackend { + fn load_block_entry(&self, _: &Hash) -> Result, Error> { + Ok(None) + } + fn load_leaves(&self) -> Result { + Ok(LeafEntrySet::default()) + } + fn load_stagnant_at(&self, _: Timestamp) -> Result, Error> { + Ok(Vec::new()) + } + fn load_stagnant_at_up_to(&self, _: Timestamp) + -> Result)>, Error> + { + Ok(Vec::new()) + } + fn load_first_block_number(&self) -> Result, Error> { + Ok(None) + } + fn load_blocks_by_number(&self, _: BlockNumber) -> Result, Error> { + Ok(Vec::new()) + } + + fn write(&mut self, _: I) -> Result<(), Error> + where I: IntoIterator + { + Ok(()) + } +} + +/// The chain selection subsystem. +pub struct ChainSelectionSubsystem; + +impl Subsystem for ChainSelectionSubsystem + where Context: SubsystemContext +{ + fn start(self, ctx: Context) -> SpawnedSubsystem { + let backend = VoidBackend; + SpawnedSubsystem { + future: run(ctx, backend).map(|()| Ok(())).boxed(), + name: "chain-selection-subsystem", + } + } +} + async fn run(mut ctx: Context, mut backend: B) where Context: SubsystemContext, @@ -262,6 +310,7 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) Context: SubsystemContext, B: Backend, { + // TODO https://github.com/paritytech/polkadot/issues/3293: Add stagnant checking timer loop. loop { match ctx.recv().await? { FromOverseer::Signal(OverseerSignal::Conclude) => { diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 5e308eef6205..a11a8197aef0 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -23,7 +23,7 @@ //! Each direct descendant of the finalized block acts as its own sub-tree, //! and as the finalized block advances, orphaned sub-trees are entirely pruned. -use polkadot_primitives::v1::{BlockNumber, Hash, Header}; +use polkadot_primitives::v1::{BlockNumber, Hash}; use polkadot_node_primitives::BlockWeight; @@ -31,7 +31,7 @@ use std::collections::HashMap; use super::{ LOG_TARGET, - Approval, BlockEntry, Error, LeafEntry, LeafEntrySet, ViabilityCriteria, + Approval, BlockEntry, Error, LeafEntry, ViabilityCriteria, Timestamp, }; use crate::backend::{Backend, OverlayedBackend}; @@ -548,6 +548,8 @@ pub(super) fn approve_block( /// /// This accepts a fresh backend and returns an overlay on top of it representing /// all changes made. +// TODO https://github.com/paritytech/polkadot/issues/3293:: remove allow +#[allow(unused)] pub(super) fn detect_stagnant<'a, B: 'a + Backend>( backend: &'a B, up_to: Timestamp, From fabc28de8d42a2c32adc04331e0e7efdbbb2ca98 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 22:24:13 +0100 Subject: [PATCH 49/79] create test module --- node/core/chain-selection/src/backend.rs | 10 ------- node/core/chain-selection/src/lib.rs | 3 ++ node/core/chain-selection/src/tests.rs | 36 ++++++++++++++++++++++++ node/core/chain-selection/src/tree.rs | 15 ---------- 4 files changed, 39 insertions(+), 25 deletions(-) create mode 100644 node/core/chain-selection/src/tests.rs diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index b97340f442f5..bc0a3c6ca563 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -226,13 +226,3 @@ pub(super) fn find_best_leaf_containing( // If there are no viable leaves containing the ancestor Ok(None) } - -#[cfg(test)] -mod tests { - use super::*; - - // TODO [now]; test find best leaf containing with no leaves. - // TODO [now]: find best leaf containing when required is finalized - // TODO [now]: find best leaf containing when required is unfinalized. - // TODO [now]: find best leaf containing when required is ancestor of many leaves. -} diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 35b1d5f07ad0..1df877659938 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -36,6 +36,9 @@ use crate::backend::{Backend, OverlayedBackend, BackendWriteOp}; mod backend; mod tree; +#[cfg(test)] +mod tests; + const LOG_TARGET: &str = "parachain::chain-selection"; type Timestamp = u64; diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs new file mode 100644 index 000000000000..c36211118a16 --- /dev/null +++ b/node/core/chain-selection/src/tests.rs @@ -0,0 +1,36 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the subsystem. +//! +//! These primarily revolve around having a backend which is shared between +//! both the test code and the tested subsystem, and which also gives the +//! test code the ability to wait for write operations to occur. + +// TODO [now]: importing a block without reversion +// TODO [now]: importing a block with reversion + +// TODO [now]: finalize a viable block +// TODO [now]: finalize an unviable block with viable descendants +// TODO [now]: finalize an unviable block with unviable descendants down the line + +// TODO [now]: mark blocks as stagnant. +// TODO [now]: approve stagnant block with unviable descendant. + +// TODO [now]; test find best leaf containing with no leaves. +// TODO [now]: find best leaf containing when required is finalized +// TODO [now]: find best leaf containing when required is unfinalized. +// TODO [now]: find best leaf containing when required is ancestor of many leaves. diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index a11a8197aef0..86c67d6b48eb 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -581,18 +581,3 @@ pub(super) fn detect_stagnant<'a, B: 'a + Backend>( Ok(backend) } - -#[cfg(test)] -mod tests { - use super::*; - - // TODO [now]: importing a block without reversion - // TODO [now]: importing a block with reversion - - // TODO [now]: finalize a viable block - // TODO [now]: finalize an unviable block with viable descendants - // TODO [now]: finalize an unviable block with unviable descendants down the line - - // TODO [now]: mark blocks as stagnant. - // TODO [now]: approve stagnant block with unviable descendant. -} From 46d2416a0e948d726c418571d69b9b383a385970 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 23:14:47 +0100 Subject: [PATCH 50/79] storage for test backend --- node/core/chain-selection/src/tests.rs | 75 ++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index c36211118a16..3129aa25aab6 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -20,6 +20,77 @@ //! both the test code and the tested subsystem, and which also gives the //! test code the ability to wait for write operations to occur. +use super::*; +use std::collections::{HashMap, BTreeMap}; + +#[derive(Default)] +struct TestBackendInner { + leaves: LeafEntrySet, + block_entries: HashMap, + blocks_by_number: BTreeMap>, + stagnant_at: BTreeMap>, +} + +struct TestBackend { + inner: TestBackendInner, +} + +impl Backend for TestBackend { + fn load_block_entry(&self, hash: &Hash) -> Result, Error> { + Ok(self.inner.block_entries.get(hash).map(|e| e.clone())) + } + fn load_leaves(&self) -> Result { + Ok(self.inner.leaves.clone()) + } + fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error> { + Ok(self.inner.stagnant_at.get(×tamp).map_or(Vec::new(), |s| s.clone())) + } + fn load_stagnant_at_up_to(&self, up_to: Timestamp) + -> Result)>, Error> + { + Ok(self.inner.stagnant_at.range(..=up_to).map(|(t, v)| (*t, v.clone())).collect()) + } + fn load_first_block_number(&self) -> Result, Error> { + Ok(self.inner.blocks_by_number.range(..).map(|(k, _)| *k).next()) + } + fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { + Ok(self.inner.blocks_by_number.get(&number).map_or(Vec::new(), |v| v.clone())) + } + + fn write(&mut self, ops: I) -> Result<(), Error> + where I: IntoIterator + { + let inner = &mut self.inner; + for op in ops { + match op { + BackendWriteOp::WriteBlockEntry(entry) => { + inner.block_entries.insert(entry.block_hash, entry); + } + BackendWriteOp::WriteBlocksByNumber(number, hashes) => { + inner.blocks_by_number.insert(number, hashes); + } + BackendWriteOp::WriteViableLeaves(leaves) => { + inner.leaves = leaves; + } + BackendWriteOp::WriteStagnantAt(time, hashes) => { + inner.stagnant_at.insert(time, hashes); + } + BackendWriteOp::DeleteBlocksByNumber(number) => { + inner.blocks_by_number.remove(&number); + } + BackendWriteOp::DeleteBlockEntry(hash) => { + inner.block_entries.remove(&hash); + } + BackendWriteOp::DeleteStagnantAt(time) => { + inner.stagnant_at.remove(&time); + } + } + } + + Ok(()) + } +} + // TODO [now]: importing a block without reversion // TODO [now]: importing a block with reversion @@ -34,3 +105,7 @@ // TODO [now]: find best leaf containing when required is finalized // TODO [now]: find best leaf containing when required is unfinalized. // TODO [now]: find best leaf containing when required is ancestor of many leaves. + +// TODO [now]: test assumption that each active leaf update gives 1 DB write. +// TODO [now]: test assumption that each approved block gives 1 DB write. +// TODO [now]: test assumption that each finalized block gives 1 DB write. From 14e851825ce096d6b857d2fe14be37b8beabaea9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 23:17:03 +0100 Subject: [PATCH 51/79] wrap inner in mutex --- Cargo.lock | 2 +- node/core/chain-selection/Cargo.toml | 2 +- node/core/chain-selection/src/tests.rs | 27 ++++++++++++++++++-------- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9bf6ade5d9a..44b2665e6a03 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6101,8 +6101,8 @@ version = "0.1.0" dependencies = [ "futures 0.3.15", "kvdb", - "kvdb-memorydb", "parity-scale-codec", + "parking_lot 0.11.1", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml index 2bd27dc2527b..5f9fdcda7fd6 100644 --- a/node/core/chain-selection/Cargo.toml +++ b/node/core/chain-selection/Cargo.toml @@ -19,4 +19,4 @@ parity-scale-codec = "2" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -kvdb-memorydb = "0.9.0" +parking_lot = "0.11" diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 3129aa25aab6..716ea7e1b2ff 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -22,6 +22,9 @@ use super::*; use std::collections::{HashMap, BTreeMap}; +use std::sync::Arc; + +use parking_lot::Mutex; #[derive(Default)] struct TestBackendInner { @@ -32,35 +35,43 @@ struct TestBackendInner { } struct TestBackend { - inner: TestBackendInner, + inner: Arc>, +} + +impl Default for TestBackend { + fn default() -> Self { + TestBackend { + inner: Default::default(), + } + } } impl Backend for TestBackend { fn load_block_entry(&self, hash: &Hash) -> Result, Error> { - Ok(self.inner.block_entries.get(hash).map(|e| e.clone())) + Ok(self.inner.lock().block_entries.get(hash).map(|e| e.clone())) } fn load_leaves(&self) -> Result { - Ok(self.inner.leaves.clone()) + Ok(self.inner.lock().leaves.clone()) } fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error> { - Ok(self.inner.stagnant_at.get(×tamp).map_or(Vec::new(), |s| s.clone())) + Ok(self.inner.lock().stagnant_at.get(×tamp).map_or(Vec::new(), |s| s.clone())) } fn load_stagnant_at_up_to(&self, up_to: Timestamp) -> Result)>, Error> { - Ok(self.inner.stagnant_at.range(..=up_to).map(|(t, v)| (*t, v.clone())).collect()) + Ok(self.inner.lock().stagnant_at.range(..=up_to).map(|(t, v)| (*t, v.clone())).collect()) } fn load_first_block_number(&self) -> Result, Error> { - Ok(self.inner.blocks_by_number.range(..).map(|(k, _)| *k).next()) + Ok(self.inner.lock().blocks_by_number.range(..).map(|(k, _)| *k).next()) } fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { - Ok(self.inner.blocks_by_number.get(&number).map_or(Vec::new(), |v| v.clone())) + Ok(self.inner.lock().blocks_by_number.get(&number).map_or(Vec::new(), |v| v.clone())) } fn write(&mut self, ops: I) -> Result<(), Error> where I: IntoIterator { - let inner = &mut self.inner; + let mut inner = self.inner.lock(); for op in ops { match op { BackendWriteOp::WriteBlockEntry(entry) => { From c4457ae054f986c485bfad75ee6056bcf4033b24 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 18 Jun 2021 23:25:18 +0100 Subject: [PATCH 52/79] add write waker query to test backend --- node/core/chain-selection/src/tests.rs | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 716ea7e1b2ff..283b7a4316af 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -24,6 +24,7 @@ use super::*; use std::collections::{HashMap, BTreeMap}; use std::sync::Arc; +use futures::channel::oneshot; use parking_lot::Mutex; #[derive(Default)] @@ -32,12 +33,39 @@ struct TestBackendInner { block_entries: HashMap, blocks_by_number: BTreeMap>, stagnant_at: BTreeMap>, + // earlier wakers at the back. + write_wakers: Vec>, } struct TestBackend { inner: Arc>, } +impl TestBackend { + // Yields a receiver which will be woken up on some future write + // to the backend along with its position (starting at 0) in the + // queue. + // + // Our tests assume that there is only one task calling this function + // and the index is useful to get a waker that will trigger after + // some known amount of writes to the backend that happen internally + // inside the subsystem. + // + // It's important to call this function at points where no writes + // are pending to the backend. This requires knowing some details + // about the internals of the subsystem, so the abstraction leaks + // somewhat, but this is acceptable enough. + fn next_write(&self) -> (usize, oneshot::Receiver<()>) { + let (tx, rx) = oneshot::channel(); + + let mut inner = self.inner.lock(); + let pos = inner.write_wakers.len(); + inner.write_wakers.insert(0, tx); + + (pos, rx) + } +} + impl Default for TestBackend { fn default() -> Self { TestBackend { @@ -98,6 +126,9 @@ impl Backend for TestBackend { } } + if let Some(waker) = inner.write_wakers.pop() { + let _ = waker.send(()); + } Ok(()) } } From 6308490a678a57cd610e763fb12f5e42913784e7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 00:11:26 +0100 Subject: [PATCH 53/79] Add OverseerSignal -> FromOverseer conversion --- node/subsystem/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/node/subsystem/src/lib.rs b/node/subsystem/src/lib.rs index 871b6f2c80c7..d070ff4d6cf0 100644 --- a/node/subsystem/src/lib.rs +++ b/node/subsystem/src/lib.rs @@ -175,6 +175,11 @@ pub enum FromOverseer { }, } +impl From for FromOverseer { + fn from(signal: OverseerSignal) -> Self { + FromOverseer::Signal(signal) + } +} /// An error type that describes faults that may happen /// From 45a07c82c27d6de447ec865056368ac4c8a7b71e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 00:11:34 +0100 Subject: [PATCH 54/79] add test harnes --- node/core/chain-selection/src/tests.rs | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 283b7a4316af..d24394089844 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -26,6 +26,9 @@ use std::sync::Arc; use futures::channel::oneshot; use parking_lot::Mutex; +use sp_core::testing::TaskExecutor; + +use polkadot_node_subsystem_test_helpers as test_helpers; #[derive(Default)] struct TestBackendInner { @@ -37,6 +40,7 @@ struct TestBackendInner { write_wakers: Vec>, } +#[derive(Clone)] struct TestBackend { inner: Arc>, } @@ -133,6 +137,32 @@ impl Backend for TestBackend { } } +type VirtualOverseer = test_helpers::TestSubsystemContextHandle; + +fn test_harness>( + test: impl FnOnce(TestBackend, VirtualOverseer) -> T +) { + let pool = TaskExecutor::new(); + let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool); + + let backend = TestBackend::default(); + let subsystem = crate::run(context, backend.clone()); + + let test_fut = test(backend, virtual_overseer); + let test_and_conclude = async move { + let mut virtual_overseer = test_fut.await; + virtual_overseer.send(OverseerSignal::Conclude.into()).await; + + // Ensure no messages are pending when the subsystem shuts down. + assert!(virtual_overseer.try_recv().await.is_none()); + }; + futures::executor::block_on(futures::future::join(subsystem, test_and_conclude)); +} + +#[test] +fn import_direct_child_of_finalized_on_empty() { +} + // TODO [now]: importing a block without reversion // TODO [now]: importing a block with reversion From f439ed7086de8ec70f6f1882994ab819f212765c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 00:12:15 +0100 Subject: [PATCH 55/79] add no-op test --- node/core/chain-selection/src/tests.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index d24394089844..b8a7b441acd3 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -159,6 +159,11 @@ fn test_harness>( futures::executor::block_on(futures::future::join(subsystem, test_and_conclude)); } +#[test] +fn no_op_subsystem_run() { + test_harness(|_, virtual_overseer| async move { virtual_overseer }); +} + #[test] fn import_direct_child_of_finalized_on_empty() { } From d163da5b868784449d99b6b774e3d249cd436489 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 00:24:58 +0100 Subject: [PATCH 56/79] add some more test helpers --- Cargo.lock | 1 + node/core/chain-selection/Cargo.toml | 1 + node/core/chain-selection/src/tests.rs | 50 ++++++++++++++++++++++++++ 3 files changed, 52 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 44b2665e6a03..e1ac98b9bcaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6099,6 +6099,7 @@ dependencies = [ name = "polkadot-node-core-chain-selection" version = "0.1.0" dependencies = [ + "assert_matches", "futures 0.3.15", "kvdb", "parity-scale-codec", diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml index 5f9fdcda7fd6..ee498427ea0d 100644 --- a/node/core/chain-selection/Cargo.toml +++ b/node/core/chain-selection/Cargo.toml @@ -20,3 +20,4 @@ parity-scale-codec = "2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } parking_lot = "0.11" +assert_matches = "1" diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index b8a7b441acd3..0606d4eca88c 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -27,7 +27,10 @@ use std::sync::Arc; use futures::channel::oneshot; use parking_lot::Mutex; use sp_core::testing::TaskExecutor; +use assert_matches::assert_matches; +use polkadot_primitives::v1::{BlakeTwo256, HashT}; +use polkadot_subsystem::messages::AllMessages; use polkadot_node_subsystem_test_helpers as test_helpers; #[derive(Default)] @@ -159,6 +162,46 @@ fn test_harness>( futures::executor::block_on(futures::future::join(subsystem, test_and_conclude)); } +// Answer requests from the subsystem about the finalized block. +async fn answer_finalized_block_info( + overseer: &mut VirtualOverseer, + finalized_hash: Hash, + finalized_number: BlockNumber, +) { + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(tx)) => { + let _ = tx.send(Ok(finalized_number)); + } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash(n, tx)) => { + assert_eq!(n, finalized_number); + let _ = tx.send(Ok(Some(finalized_hash))); + } + ); +} + +fn child_header(parent_hash: Hash, parent_number: BlockNumber) -> Header { + child_header_with_salt(parent_hash, parent_number, &[]) +} + +fn child_header_with_salt( + parent_hash: Hash, + parent_number: BlockNumber, + salt: &[u8], // so siblings can have different hashes. +) -> Header { + Header { + parent_hash, + number: parent_number + 1, + state_root: BlakeTwo256::hash(salt), + extrinsics_root: Default::default(), + digest: Default::default() + } +} + #[test] fn no_op_subsystem_run() { test_harness(|_, virtual_overseer| async move { virtual_overseer }); @@ -166,6 +209,11 @@ fn no_op_subsystem_run() { #[test] fn import_direct_child_of_finalized_on_empty() { + test_harness(|backend, mut virtual_overseer| async move { + + + virtual_overseer + }) } // TODO [now]: importing a block without reversion @@ -183,6 +231,8 @@ fn import_direct_child_of_finalized_on_empty() { // TODO [now]: find best leaf containing when required is unfinalized. // TODO [now]: find best leaf containing when required is ancestor of many leaves. +// TODO [now]: leaf tiebreakers are based on height. + // TODO [now]: test assumption that each active leaf update gives 1 DB write. // TODO [now]: test assumption that each approved block gives 1 DB write. // TODO [now]: test assumption that each finalized block gives 1 DB write. From c8383886205e0da44568dbf510e114420ccf9cdf Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 00:43:22 +0100 Subject: [PATCH 57/79] the first test --- node/core/chain-selection/src/tests.rs | 83 ++++++++++++++++++++++++-- 1 file changed, 77 insertions(+), 6 deletions(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 0606d4eca88c..86ef80ada5ff 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -30,6 +30,7 @@ use sp_core::testing::TaskExecutor; use assert_matches::assert_matches; use polkadot_primitives::v1::{BlakeTwo256, HashT}; +use polkadot_subsystem::{jaeger, ActiveLeavesUpdate, ActivatedLeaf, LeafStatus}; use polkadot_subsystem::messages::AllMessages; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -62,7 +63,7 @@ impl TestBackend { // are pending to the backend. This requires knowing some details // about the internals of the subsystem, so the abstraction leaks // somewhat, but this is acceptable enough. - fn next_write(&self) -> (usize, oneshot::Receiver<()>) { + fn await_next_write(&self) -> (usize, oneshot::Receiver<()>) { let (tx, rx) = oneshot::channel(); let mut inner = self.inner.lock(); @@ -71,6 +72,13 @@ impl TestBackend { (pos, rx) } + + fn await_next_write_expecting(&self, expected_pos: usize) -> oneshot::Receiver<()> { + let (pos, rx) = self.await_next_write(); + assert_eq!(pos, expected_pos); + + rx + } } impl Default for TestBackend { @@ -165,8 +173,8 @@ fn test_harness>( // Answer requests from the subsystem about the finalized block. async fn answer_finalized_block_info( overseer: &mut VirtualOverseer, - finalized_hash: Hash, finalized_number: BlockNumber, + finalized_hash: Hash, ) { assert_matches!( overseer.recv().await, @@ -184,13 +192,41 @@ async fn answer_finalized_block_info( ); } -fn child_header(parent_hash: Hash, parent_number: BlockNumber) -> Header { - child_header_with_salt(parent_hash, parent_number, &[]) +async fn answer_header_request( + overseer: &mut VirtualOverseer, + maybe_header: impl Into>, +) { + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(hash, tx)) => { + let maybe_header = maybe_header.into(); + assert!(maybe_header.as_ref().map_or(true, |h| h.hash() == hash)); + let _ = tx.send(Ok(maybe_header)); + } + ) +} + +async fn answer_weight_request( + overseer: &mut VirtualOverseer, + hash: Hash, + weight: impl Into>, +) { + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockWeight(h, tx)) => { + assert_eq!(h, hash); + let _ = tx.send(Ok(weight.into())); + } + ) +} + +fn child_header(parent_number: BlockNumber, parent_hash: Hash) -> Header { + child_header_with_salt(parent_number, parent_hash, &[]) } fn child_header_with_salt( - parent_hash: Hash, parent_number: BlockNumber, + parent_hash: Hash, salt: &[u8], // so siblings can have different hashes. ) -> Header { Header { @@ -210,7 +246,42 @@ fn no_op_subsystem_run() { #[test] fn import_direct_child_of_finalized_on_empty() { test_harness(|backend, mut virtual_overseer| async move { - + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + let child = child_header(finalized_number, finalized_hash); + let child_hash = child.hash(); + let child_weight = 1; + let child_number = child.number; + + let write_rx = backend.await_next_write_expecting(0); + virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + ActivatedLeaf { + hash: child_hash, + number: child_number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } + )).into()).await; + + answer_finalized_block_info( + &mut virtual_overseer, + finalized_number, + finalized_hash, + ).await; + + answer_header_request(&mut virtual_overseer, child.clone()).await; + answer_weight_request(&mut virtual_overseer, child_hash, child_weight).await; + + write_rx.await.unwrap(); + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), child_number); + assert_eq!(backend.load_blocks_by_number(child_number).unwrap(), vec![child_hash]); + assert!(backend.load_block_entry(&child_hash).unwrap().is_some()); + assert_eq!( + backend.load_leaves().unwrap().into_hashes_descending().into_iter().collect::>(), + vec![child_hash], + ); virtual_overseer }) From b8c12ee2714e1b52ec2d0cd4f8f3ba0699dc802a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 06:32:59 +0100 Subject: [PATCH 58/79] more progress on tests --- node/core/chain-selection/src/lib.rs | 3 +- node/core/chain-selection/src/tests.rs | 218 ++++++++++++++++++++++--- node/core/chain-selection/src/tree.rs | 2 + 3 files changed, 201 insertions(+), 22 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 1df877659938..c4748ea65e01 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -133,7 +133,7 @@ impl LeafEntrySet { } } - fn into_hashes_descending(self) -> impl IntoIterator { + fn into_hashes_descending(self) -> impl Iterator { self.inner.into_iter().map(|e| e.block_hash) } } @@ -548,7 +548,6 @@ async fn load_leaves( ) -> Result, Error> { let leaves: Vec<_> = backend.load_leaves()? .into_hashes_descending() - .into_iter() .collect(); if leaves.is_empty() { diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 86ef80ada5ff..15a6f8a2e359 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -73,12 +73,30 @@ impl TestBackend { (pos, rx) } + // return a receiver, expecting its position to be the given one. fn await_next_write_expecting(&self, expected_pos: usize) -> oneshot::Receiver<()> { let (pos, rx) = self.await_next_write(); assert_eq!(pos, expected_pos); rx } + + // return a receiver that will wake up after n other receivers, + // inserting receivers as necessary. + // + // panics if there are already more than n receivers. + fn await_nth_write(&self, n: usize) -> oneshot::Receiver<()> { + assert_ne!(n, 0, "invalid parameter 0"); + let expected_pos = n - 1; + + loop { + let (pos, rx) = self.await_next_write(); + assert!(pos <= expected_pos, "pending awaits {} > {}", pos, expected_pos); + if pos == expected_pos { + break rx; + } + } + } } impl Default for TestBackend { @@ -221,23 +239,101 @@ async fn answer_weight_request( } fn child_header(parent_number: BlockNumber, parent_hash: Hash) -> Header { - child_header_with_salt(parent_number, parent_hash, &[]) -} - -fn child_header_with_salt( - parent_number: BlockNumber, - parent_hash: Hash, - salt: &[u8], // so siblings can have different hashes. -) -> Header { Header { parent_hash, number: parent_number + 1, - state_root: BlakeTwo256::hash(salt), + state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default() } } +fn salt_header(header: &mut Header, salt: &[u8]) { + header.state_root = BlakeTwo256::hash(salt) +} + +// Builds a chain on top of the given base. Returns the chain (ascending) +// along with the head hash. +fn construct_chain_on_base( + len: usize, + base_number: BlockNumber, + base_hash: Hash, + mut mutate: impl FnMut(&mut Header), +) -> (Hash, Vec
) { + let mut parent_number = base_number; + let mut parent_hash = base_hash; + + let mut chain = Vec::new(); + for _ in 0..len { + let mut header = child_header(parent_number, parent_hash); + mutate(&mut header); + + parent_number = header.number; + parent_hash = header.hash(); + chain.push(header); + } + + (parent_hash, chain) +} + +fn zip_chain_and_weights(headers: &[Header], weights: &[BlockWeight]) + -> Vec<(Header, BlockWeight)> +{ + headers.iter().cloned().zip(weights.iter().cloned()).collect() +} + +async fn answer_ancestry_requests( + virtual_overseer: &mut VirtualOverseer, + finalized_answer: Option<(BlockNumber, Hash)>, + answers: Vec<(Header, BlockWeight)>, +) { + if let Some((f_n, f_h)) = finalized_answer { + answer_finalized_block_info(virtual_overseer, f_n, f_h).await; + } + + // headers in reverse order, + // TODO [now]: answer ancestor requests. + for &(ref header, _) in answers.iter().rev() { + answer_header_request(virtual_overseer, header.clone()).await; + } + + // Then weights going up. + for &(ref header, weight) in answers.iter() { + let hash = header.hash(); + answer_weight_request(virtual_overseer, hash, weight).await; + } +} + +fn assert_backend_contains<'a>( + backend: &TestBackend, + headers: impl IntoIterator, +) { + for header in headers { + let hash = header.hash(); + assert!( + backend.load_blocks_by_number(header.number).unwrap().contains(&hash), + "blocks at {} does not contain {}", + header.number, + hash, + ); + assert!( + backend.load_block_entry(&hash).unwrap().is_some(), + "no entry found for {}", + hash, + ); + } +} + +fn assert_leaves( + backend: &TestBackend, + leaves: Vec, +) { + assert_eq!( + backend.load_leaves().unwrap().into_hashes_descending().into_iter().collect::>(), + leaves, + ) +} + #[test] fn no_op_subsystem_run() { test_harness(|_, virtual_overseer| async move { virtual_overseer }); @@ -264,30 +360,112 @@ fn import_direct_child_of_finalized_on_empty() { } )).into()).await; - answer_finalized_block_info( + answer_ancestry_requests( &mut virtual_overseer, + Some((finalized_number, finalized_hash)), + vec![(child.clone(), child_weight)], + ).await; + + write_rx.await.unwrap(); + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), child_number); + assert_backend_contains(&backend, &[child]); + assert_leaves(&backend, vec![child_hash]); + + virtual_overseer + }) +} + +#[test] +fn import_chain_on_finalized_incrementally() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + let (head_hash, chain) = construct_chain_on_base( + 5, finalized_number, finalized_hash, - ).await; + |_| {} + ); - answer_header_request(&mut virtual_overseer, child.clone()).await; - answer_weight_request(&mut virtual_overseer, child_hash, child_weight).await; + let chain = zip_chain_and_weights( + &chain, + &[1, 2, 3, 4, 5], + ); + + let write_rx = backend.await_nth_write(5); + for &(ref header, weight) in &chain { + virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + ActivatedLeaf { + hash: header.hash(), + number: header.number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } + )).into()).await; + + answer_ancestry_requests( + &mut virtual_overseer, + Some((finalized_number, finalized_hash)).filter(|_| header.number == 1), + vec![(header.clone(), weight)] + ).await; + } write_rx.await.unwrap(); - assert_eq!(backend.load_first_block_number().unwrap().unwrap(), child_number); - assert_eq!(backend.load_blocks_by_number(child_number).unwrap(), vec![child_hash]); - assert!(backend.load_block_entry(&child_hash).unwrap().is_some()); - assert_eq!( - backend.load_leaves().unwrap().into_hashes_descending().into_iter().collect::>(), - vec![child_hash], + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![head_hash]); + + virtual_overseer + }) +} + +#[test] +fn import_chain_on_finalized_at_once() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + let (head_hash, chain) = construct_chain_on_base( + 5, + finalized_number, + finalized_hash, + |_| {} ); + let chain = zip_chain_and_weights( + &chain, + &[1, 2, 3, 4, 5], + ); + + let write_rx = backend.await_next_write_expecting(0); + virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + ActivatedLeaf { + hash: head_hash, + number: 5, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } + )).into()).await; + + answer_ancestry_requests( + &mut virtual_overseer, + Some((finalized_number, finalized_hash)), + chain.clone(), + ).await; + + write_rx.await.unwrap(); + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![head_hash]); + virtual_overseer }) } -// TODO [now]: importing a block without reversion // TODO [now]: importing a block with reversion // TODO [now]: finalize a viable block diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 86c67d6b48eb..886f01a2e53e 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -315,6 +315,8 @@ fn add_block( let inherited_viability = parent_entry.as_ref() .and_then(|parent| parent.non_viable_ancestor_for_child()); + println!("writing block #{},{} parent={}", block_number, block_hash, parent_hash); + // 1. Add the block to the DB assuming it's not reverted. backend.write_block_entry( BlockEntry { From a05cb2b06e4d52bae44b5877ae8c0c2348d51091 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 18:40:06 +0100 Subject: [PATCH 59/79] test two subtrees --- node/core/chain-selection/src/lib.rs | 22 +++- node/core/chain-selection/src/tests.rs | 149 ++++++++++--------------- node/core/chain-selection/src/tree.rs | 2 - 3 files changed, 79 insertions(+), 94 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index c4748ea65e01..3ba04a94dcb2 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -125,12 +125,19 @@ impl LeafEntrySet { } fn insert(&mut self, new: LeafEntry) { - match self.inner.iter().position(|e| e < &new) { - None => self.inner.push(new), - Some(i) => if self.inner[i].block_hash != new.block_hash { - self.inner.insert(i, new); + let mut pos = None; + for (i, e) in self.inner.iter().enumerate() { + if e == &new { return } + if e < &new { + pos = Some(i); + break } } + + match pos { + None => self.inner.push(new), + Some(i) => self.inner.insert(i, new), + } } fn into_hashes_descending(self) -> impl Iterator { @@ -407,7 +414,12 @@ async fn handle_active_leaf( hash: Hash, ) -> Result, Error> { let lower_bound = match backend.load_first_block_number()? { - Some(l) => l, + Some(l) => { + // We want to iterate back to finalized, and first block number + // is assumed to be 1 above finalized - the implicit root of the + // tree. + l.saturating_sub(1) + }, None => fetch_finalized(ctx).await?.map_or(1, |(_, n)| n), }; diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 15a6f8a2e359..d880914b4391 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -25,6 +25,7 @@ use std::collections::{HashMap, BTreeMap}; use std::sync::Arc; use futures::channel::oneshot; +use parity_scale_codec::Encode; use parking_lot::Mutex; use sp_core::testing::TaskExecutor; use assert_matches::assert_matches; @@ -248,59 +249,63 @@ fn child_header(parent_number: BlockNumber, parent_hash: Hash) -> Header { } } -fn salt_header(header: &mut Header, salt: &[u8]) { - header.state_root = BlakeTwo256::hash(salt) +fn salt_header(header: &mut Header, salt: impl Encode) { + header.state_root = BlakeTwo256::hash_of(&salt) } -// Builds a chain on top of the given base. Returns the chain (ascending) -// along with the head hash. +// Builds a chain on top of the given base, with one block for each +// provided weight. fn construct_chain_on_base( - len: usize, + weights: impl IntoIterator, base_number: BlockNumber, base_hash: Hash, mut mutate: impl FnMut(&mut Header), -) -> (Hash, Vec
) { +) -> (Hash, Vec<(Header, BlockWeight)>) { let mut parent_number = base_number; let mut parent_hash = base_hash; let mut chain = Vec::new(); - for _ in 0..len { + for weight in weights { let mut header = child_header(parent_number, parent_hash); mutate(&mut header); parent_number = header.number; parent_hash = header.hash(); - chain.push(header); + chain.push((header, weight)); } (parent_hash, chain) } -fn zip_chain_and_weights(headers: &[Header], weights: &[BlockWeight]) - -> Vec<(Header, BlockWeight)> -{ - headers.iter().cloned().zip(weights.iter().cloned()).collect() -} - -async fn answer_ancestry_requests( +// import blocks 1-by-1. If `finalized_base` is supplied, +// it will be answered before the first block in `answers. +async fn import_blocks_into( virtual_overseer: &mut VirtualOverseer, - finalized_answer: Option<(BlockNumber, Hash)>, - answers: Vec<(Header, BlockWeight)>, + backend: &TestBackend, + mut finalized_base: Option<(BlockNumber, Hash)>, + blocks: Vec<(Header, BlockWeight)>, ) { - if let Some((f_n, f_h)) = finalized_answer { - answer_finalized_block_info(virtual_overseer, f_n, f_h).await; - } - - // headers in reverse order, - // TODO [now]: answer ancestor requests. - for &(ref header, _) in answers.iter().rev() { - answer_header_request(virtual_overseer, header.clone()).await; - } + for (header, weight) in blocks { + let (_, write_rx) = backend.await_next_write(); - // Then weights going up. - for &(ref header, weight) in answers.iter() { let hash = header.hash(); + virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + ActivatedLeaf { + hash, + number: header.number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } + )).into()).await; + + if let Some((f_n, f_h)) = finalized_base.take() { + answer_finalized_block_info(virtual_overseer, f_n, f_h).await; + } + + answer_header_request(virtual_overseer, header.clone()).await; answer_weight_request(virtual_overseer, hash, weight).await; + + write_rx.await.unwrap(); } } @@ -350,24 +355,13 @@ fn import_direct_child_of_finalized_on_empty() { let child_weight = 1; let child_number = child.number; - let write_rx = backend.await_next_write_expecting(0); - virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( - ActivatedLeaf { - hash: child_hash, - number: child_number, - status: LeafStatus::Fresh, - span: Arc::new(jaeger::Span::Disabled), - } - )).into()).await; - - answer_ancestry_requests( + import_blocks_into( &mut virtual_overseer, + &backend, Some((finalized_number, finalized_hash)), vec![(child.clone(), child_weight)], ).await; - write_rx.await.unwrap(); - assert_eq!(backend.load_first_block_number().unwrap().unwrap(), child_number); assert_backend_contains(&backend, &[child]); assert_leaves(&backend, vec![child_hash]); @@ -383,36 +377,18 @@ fn import_chain_on_finalized_incrementally() { let finalized_hash = Hash::repeat_byte(0); let (head_hash, chain) = construct_chain_on_base( - 5, + vec![1, 2, 3, 4, 5], finalized_number, finalized_hash, |_| {} ); - let chain = zip_chain_and_weights( - &chain, - &[1, 2, 3, 4, 5], - ); - - let write_rx = backend.await_nth_write(5); - for &(ref header, weight) in &chain { - virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( - ActivatedLeaf { - hash: header.hash(), - number: header.number, - status: LeafStatus::Fresh, - span: Arc::new(jaeger::Span::Disabled), - } - )).into()).await; - - answer_ancestry_requests( - &mut virtual_overseer, - Some((finalized_number, finalized_hash)).filter(|_| header.number == 1), - vec![(header.clone(), weight)] - ).await; - } - - write_rx.await.unwrap(); + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain.clone(), + ).await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); assert_backend_contains(&backend, chain.iter().map(|&(ref h, _)| h)); @@ -423,44 +399,43 @@ fn import_chain_on_finalized_incrementally() { } #[test] -fn import_chain_on_finalized_at_once() { +fn import_two_subtrees_on_finalized() { test_harness(|backend, mut virtual_overseer| async move { let finalized_number = 0; let finalized_hash = Hash::repeat_byte(0); - let (head_hash, chain) = construct_chain_on_base( - 5, + let (a_hash, chain_a) = construct_chain_on_base( + vec![1], finalized_number, finalized_hash, |_| {} ); - let chain = zip_chain_and_weights( - &chain, - &[1, 2, 3, 4, 5], + let (b_hash, chain_b) = construct_chain_on_base( + vec![2], + finalized_number, + finalized_hash, + |h| salt_header(h, b"a"), ); - let write_rx = backend.await_next_write_expecting(0); - virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( - ActivatedLeaf { - hash: head_hash, - number: 5, - status: LeafStatus::Fresh, - span: Arc::new(jaeger::Span::Disabled), - } - )).into()).await; - - answer_ancestry_requests( + import_blocks_into( &mut virtual_overseer, + &backend, Some((finalized_number, finalized_hash)), - chain.clone(), + chain_a.clone(), ).await; - write_rx.await.unwrap(); + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); - assert_backend_contains(&backend, chain.iter().map(|&(ref h, _)| h)); - assert_leaves(&backend, vec![head_hash]); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b_hash, a_hash]); virtual_overseer }) diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 886f01a2e53e..86c67d6b48eb 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -315,8 +315,6 @@ fn add_block( let inherited_viability = parent_entry.as_ref() .and_then(|parent| parent.non_viable_ancestor_for_child()); - println!("writing block #{},{} parent={}", block_number, block_hash, parent_hash); - // 1. Add the block to the DB assuming it's not reverted. backend.write_block_entry( BlockEntry { From 1711f113ff8bcd5680e6f6e0585823cb6457d0c4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 19:30:43 +0100 Subject: [PATCH 60/79] determine-new-blocks: cleaner genesis avoidance and tighter ancestry requests --- .../src/determine_new_blocks.rs | 105 ++++++++++++++---- 1 file changed, 86 insertions(+), 19 deletions(-) diff --git a/node/subsystem-util/src/determine_new_blocks.rs b/node/subsystem-util/src/determine_new_blocks.rs index f77b598f50d8..63660b1cc036 100644 --- a/node/subsystem-util/src/determine_new_blocks.rs +++ b/node/subsystem-util/src/determine_new_blocks.rs @@ -43,11 +43,13 @@ pub async fn determine_new_blocks( ) -> Result, E> { const ANCESTRY_STEP: usize = 4; + let min_block_needed = lower_bound_number + 1; + // Early exit if the block is in the DB or too early. { let already_known = is_known(&head)?; - let before_relevant = header.number <= lower_bound_number; + let before_relevant = header.number < min_block_needed; if already_known || before_relevant { return Ok(Vec::new()); @@ -56,8 +58,9 @@ pub async fn determine_new_blocks( let mut ancestry = vec![(head, header.clone())]; - // Early exit if the parent hash is in the DB. - if is_known(&header.parent_hash)? { + // Early exit if the parent hash is in the DB or no further blocks + // are needed. + if is_known(&header.parent_hash)? || header.number == min_block_needed { return Ok(ancestry); } @@ -65,15 +68,24 @@ pub async fn determine_new_blocks( let &(ref last_hash, ref last_header) = ancestry.last() .expect("ancestry has length 1 at initialization and is only added to; qed"); - // If we iterated back to genesis, which can happen at the beginning of chains. - if last_header.number <= 1 { - break 'outer - } + assert!( + last_header.number > min_block_needed, + "Loop invariant: the last block in ancestry is checked to be \ + above the minimum before the loop, and at the end of each iteration; \ + qed" + ); let (tx, rx) = oneshot::channel(); + + // This is always non-zero as determined by the loop invariant + // above. + let ancestry_step = std::cmp::min( + ANCESTRY_STEP, + (last_header.number - min_block_needed) as usize, + ); ctx.send_message(ChainApiMessage::Ancestors { hash: *last_hash, - k: ANCESTRY_STEP, + k: ancestry_step, response_channel: tx, }.into()).await; @@ -116,13 +128,18 @@ pub async fn determine_new_blocks( for (hash, header) in batch_hashes.into_iter().zip(batch_headers) { let is_known = is_known(&hash)?; - let is_relevant = header.number > lower_bound_number; + let is_relevant = header.number >= min_block_needed; + let is_terminating = header.number == min_block_needed; if is_known || !is_relevant { break 'outer } ancestry.push((hash, header)); + + if is_terminating { + break 'outer + } } } @@ -299,20 +316,17 @@ mod tests { response_channel: tx, }) => { assert_eq!(h, chain.hash_by_number(14).unwrap()); - assert_eq!(k, 4); + assert_eq!(k, 1); let _ = tx.send(Ok(chain.ancestry(&h, k as _))); } ); - for _ in 0..4 { - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { - let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone()))); - } - ); - } - + assert_matches!( + handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone()))); + } + ); }); futures::executor::block_on(futures::future::join(test_fut, aux_fut)); @@ -514,4 +528,57 @@ mod tests { futures::executor::block_on(test_fut); } + + #[test] + fn determine_new_blocks_does_not_request_genesis() { + let pool = TaskExecutor::new(); + let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); + + let chain = TestChain::new(1, 2); + + let head = chain.header_by_number(2).unwrap().clone(); + let head_hash = head.hash(); + let known = TestKnownBlocks::default(); + + let expected_ancestry = (1..=2) + .map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap()) + .rev() + .collect::>(); + + let test_fut = Box::pin(async move { + let ancestry = determine_new_blocks( + ctx.sender(), + |h| known.is_known(h), + head_hash, + &head, + 0, + ).await.unwrap(); + + assert_eq!(ancestry, expected_ancestry); + }); + + let aux_fut = Box::pin(async move { + assert_matches!( + handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::Ancestors { + hash: h, + k, + response_channel: tx, + }) => { + assert_eq!(h, head_hash); + assert_eq!(k, 1); + let _ = tx.send(Ok(chain.ancestry(&h, k as _))); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone()))); + } + ); + }); + + futures::executor::block_on(futures::future::join(test_fut, aux_fut)); + } } From 0586dbc6ccf209d1602fa86763f4ea39690c1efa Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 20:03:03 +0100 Subject: [PATCH 61/79] don't make ancestry requests when asking for one block --- .../src/determine_new_blocks.rs | 97 +++++++++++++------ 1 file changed, 67 insertions(+), 30 deletions(-) diff --git a/node/subsystem-util/src/determine_new_blocks.rs b/node/subsystem-util/src/determine_new_blocks.rs index 63660b1cc036..adfc614beef9 100644 --- a/node/subsystem-util/src/determine_new_blocks.rs +++ b/node/subsystem-util/src/determine_new_blocks.rs @@ -83,16 +83,21 @@ pub async fn determine_new_blocks( ANCESTRY_STEP, (last_header.number - min_block_needed) as usize, ); - ctx.send_message(ChainApiMessage::Ancestors { - hash: *last_hash, - k: ancestry_step, - response_channel: tx, - }.into()).await; - - // Continue past these errors. - let batch_hashes = match rx.await { - Err(_) | Ok(Err(_)) => break 'outer, - Ok(Ok(ancestors)) => ancestors, + + let batch_hashes = if ancestry_step == 1 { + vec![last_header.parent_hash] + } else { + ctx.send_message(ChainApiMessage::Ancestors { + hash: *last_hash, + k: ancestry_step, + response_channel: tx, + }.into()).await; + + // Continue past these errors. + match rx.await { + Err(_) | Ok(Err(_)) => break 'outer, + Ok(Ok(ancestors)) => ancestors, + } }; let batch_headers = { @@ -308,22 +313,10 @@ mod tests { ); } - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::Ancestors { - hash: h, - k, - response_channel: tx, - }) => { - assert_eq!(h, chain.hash_by_number(14).unwrap()); - assert_eq!(k, 1); - let _ = tx.send(Ok(chain.ancestry(&h, k as _))); - } - ); - assert_matches!( handle.recv().await, AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + assert_eq!(h, chain.hash_by_number(13).unwrap()); let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone()))); } ); @@ -557,6 +550,47 @@ mod tests { assert_eq!(ancestry, expected_ancestry); }); + let aux_fut = Box::pin(async move { + assert_matches!( + handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + assert_eq!(h, chain.hash_by_number(1).unwrap()); + let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone()))); + } + ); + }); + + futures::executor::block_on(futures::future::join(test_fut, aux_fut)); + } + + #[test] + fn determine_new_blocks_does_not_request_genesis_even_in_multi_ancestry() { + let pool = TaskExecutor::new(); + let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone()); + + let chain = TestChain::new(1, 3); + + let head = chain.header_by_number(3).unwrap().clone(); + let head_hash = head.hash(); + let known = TestKnownBlocks::default(); + + let expected_ancestry = (1..=3) + .map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap()) + .rev() + .collect::>(); + + let test_fut = Box::pin(async move { + let ancestry = determine_new_blocks( + ctx.sender(), + |h| known.is_known(h), + head_hash, + &head, + 0, + ).await.unwrap(); + + assert_eq!(ancestry, expected_ancestry); + }); + let aux_fut = Box::pin(async move { assert_matches!( handle.recv().await, @@ -566,17 +600,20 @@ mod tests { response_channel: tx, }) => { assert_eq!(h, head_hash); - assert_eq!(k, 1); + assert_eq!(k, 2); + let _ = tx.send(Ok(chain.ancestry(&h, k as _))); } ); - assert_matches!( - handle.recv().await, - AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { - let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone()))); - } - ); + for _ in 0..2 { + assert_matches!( + handle.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone()))); + } + ); + } }); futures::executor::block_on(futures::future::join(test_fut, aux_fut)); From d73d62180207c4526d52855c4b8a7de950244698 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 20:15:54 +0100 Subject: [PATCH 62/79] add a couple more tests --- node/core/chain-selection/src/lib.rs | 9 +- node/core/chain-selection/src/tests.rs | 119 ++++++++++++++++++++++++- node/core/chain-selection/src/tree.rs | 5 +- 3 files changed, 125 insertions(+), 8 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 3ba04a94dcb2..291d7ec0d467 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -100,11 +100,10 @@ struct LeafEntry { impl PartialOrd for LeafEntry { fn partial_cmp(&self, other: &Self) -> Option { - if self.weight == other.weight { - self.block_number.partial_cmp(&other.block_number) - } else { - self.weight.partial_cmp(&other.weight) - } + let ord = self.weight.cmp(&other.weight) + .then(self.block_number.cmp(&other.block_number)); + + if ord.is_ne() { Some(ord) } else { None } } } diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index d880914b4391..b1ad3107a031 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -309,6 +309,14 @@ async fn import_blocks_into( } } +fn extract_info_from_chain(i: usize, chain: &[(Header, BlockWeight)]) + -> (BlockNumber, Hash, BlockWeight) +{ + let &(ref header, weight) = &chain[i]; + + (header.number, header.hash(), weight) +} + fn assert_backend_contains<'a>( backend: &TestBackend, headers: impl IntoIterator, @@ -415,7 +423,7 @@ fn import_two_subtrees_on_finalized() { vec![2], finalized_number, finalized_hash, - |h| salt_header(h, b"a"), + |h| salt_header(h, b"b"), ); import_blocks_into( @@ -441,6 +449,115 @@ fn import_two_subtrees_on_finalized() { }) } +#[test] +fn import_two_subtrees_on_nonzero_finalized() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 100; + let finalized_hash = Hash::repeat_byte(0); + + let (a_hash, chain_a) = construct_chain_on_base( + vec![1], + finalized_number, + finalized_hash, + |_| {} + ); + + let (b_hash, chain_b) = construct_chain_on_base( + vec![2], + finalized_number, + finalized_hash, + |h| salt_header(h, b"b"), + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 101); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b_hash, a_hash]); + + virtual_overseer + }) +} + +#[test] +fn leaves_ordered_by_weight_and_then_number() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 + // F <- C1 <- C2 + // + // expected_leaves: [(C2, 3), (A3, 2), (B2, 2)] + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 1, 2], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![2], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + let (c2_hash, chain_c) = construct_chain_on_base( + vec![1, 3], + finalized_number, + finalized_hash, + |h| salt_header(h, b"c"), + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_c.clone(), + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_c.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![c2_hash, a3_hash, b2_hash]); + virtual_overseer + }); +} + // TODO [now]: importing a block with reversion // TODO [now]: finalize a viable block diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 86c67d6b48eb..53893f87ac94 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -331,8 +331,9 @@ fn add_block( } ); - // 2. Update leaves if parent was a viable leaf or the parent is unknown. - if leaves.remove(&parent_hash) || parent_entry.is_none() { + // 2. Update leaves if inherited viability is fine. + if inherited_viability.is_none() { + leaves.remove(&parent_hash); leaves.insert(LeafEntry { block_hash, block_number, weight }); backend.write_leaves(leaves); } From 2dd5358ac320d91b90640676138b4a4e5ce73d0e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 20:17:47 +0100 Subject: [PATCH 63/79] add to AllMessages in guide --- roadmap/implementers-guide/src/types/overseer-protocol.md | 1 + 1 file changed, 1 insertion(+) diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index 77e8bd862653..70e214a79492 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -70,6 +70,7 @@ enum AllMessages { GossipSupport(GossipSupportMessage), DisputeCoordinator(DisputeCoordinatorMessage), DisputeParticipation(DisputeParticipationMessage), + ChainSelection(ChainSelectionMessage), } ``` From f42230cb05176805937c057042e5528f43688103 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 20:19:11 +0100 Subject: [PATCH 64/79] remove bad spaces from bridge --- node/network/bridge/src/tests.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/node/network/bridge/src/tests.rs b/node/network/bridge/src/tests.rs index eb741654f3c3..48296fb94f05 100644 --- a/node/network/bridge/src/tests.rs +++ b/node/network/bridge/src/tests.rs @@ -1280,11 +1280,11 @@ fn spread_event_to_subsystems_is_up_to_date() { AllMessages::DisputeCoordinator(_) => unreachable!("Not interested in network events"), AllMessages::DisputeParticipation(_) => unreachable!("Not interetsed in network events"), AllMessages::ChainSelection(_) => unreachable!("Not interested in network events"), - // Add variants here as needed, `{ cnt += 1; }` for those that need to be - // notified, `unreachable!()` for those that should not. - } - } - assert_eq!(cnt, EXPECTED_COUNT); + // Add variants here as needed, `{ cnt += 1; }` for those that need to be + // notified, `unreachable!()` for those that should not. + } + } + assert_eq!(cnt, EXPECTED_COUNT); } #[test] From 99b4392e047ec2962679a2d926c58382c391b086 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 20:20:05 +0100 Subject: [PATCH 65/79] compact iterator --- node/core/chain-selection/src/tree.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs index 53893f87ac94..a10f0d0c5ad5 100644 --- a/node/core/chain-selection/src/tree.rs +++ b/node/core/chain-selection/src/tree.rs @@ -281,7 +281,7 @@ fn load_ancestor( let mut current_entry = None; let segment_length = (block_number - ancestor_number) + 1; - for _ in std::iter::repeat(()).take(segment_length as usize) { + for _ in 0..segment_length { match backend.load_block_entry(¤t_hash)? { None => return Ok(None), Some(entry) => { From 5afd66a906f8e50fa48478d0fcf8e5e4df0f9b1c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 22:25:47 +0100 Subject: [PATCH 66/79] test import with gaps --- node/core/chain-selection/src/tests.rs | 170 +++++++++++++++++++++++++ 1 file changed, 170 insertions(+) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index b1ad3107a031..29caa38ac99e 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -309,6 +309,126 @@ async fn import_blocks_into( } } +// Import blocks all at once. This assumes that the ancestor is known/finalized +// but none of the other blocks. +// import blocks 1-by-1. If `finalized_base` is supplied, +// it will be answered before the first block. +// +// some pre-blocks may need to be supplied to answer ancestry requests +// that gather batches beyond the beginning of the new chain. +// pre-blocks are those already known by the subsystem, however, +// the subsystem has no way of knowin that until requesting ancestry. +async fn import_all_blocks_into( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + finalized_base: Option<(BlockNumber, Hash)>, + pre_blocks: Vec
, + blocks: Vec<(Header, BlockWeight)>, +) { + assert!(blocks.len() > 1, "gap only makes sense if importing multiple blocks"); + + let head = blocks.last().unwrap().0.clone(); + let head_hash = head.hash(); + let head_parent_hash = head.parent_hash; + + let (_, write_rx) = backend.await_next_write(); + virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + ActivatedLeaf { + hash: head_hash, + number: head.number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } + )).into()).await; + + if let Some((f_n, f_h)) = finalized_base { + answer_finalized_block_info(virtual_overseer, f_n, f_h).await; + } + + // Head is always fetched first. + answer_header_request(virtual_overseer, head).await; + + // Answer header and ancestry requests until the parent of head + // is imported. + { + let find_block_header = |expected_hash| { + pre_blocks.iter().cloned() + .chain(blocks.iter().map(|(h, _)| h.clone())) + .find(|hdr| hdr.hash() == expected_hash) + .unwrap() + }; + + let mut behind_head = 0; + loop { + let nth_ancestor_of_head = |n: usize| { + // blocks: [d, e, f, head] + // pre: [a, b, c] + // + // [a, b, c, d, e, f, head] + // [6, 5, 4, 3, 2, 1, 0] + + let new_ancestry_end = blocks.len() - 1; + if n > new_ancestry_end { + // [6, 5, 4] -> [2, 1, 0] + let n_in_pre = n - blocks.len(); + let pre_blocks_end = pre_blocks.len() - 1; + pre_blocks[pre_blocks_end - n_in_pre].clone() + } else { + let blocks_end = blocks.len() - 1; + blocks[blocks_end - n].0.clone() + } + }; + + match virtual_overseer.recv().await { + AllMessages::ChainApi(ChainApiMessage::Ancestors { + hash: h, + k, + response_channel: tx, + }) => { + let prev_response = nth_ancestor_of_head(behind_head); + assert_eq!(h, prev_response.hash()); + + let _ = tx.send(Ok( + (0..k as usize).map(|n| n + behind_head + 1) + .map(nth_ancestor_of_head) + .map(|h| h.hash()) + .collect() + )); + + for _ in 0..k { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + let header = find_block_header(h); + let _ = tx.send(Ok(Some(header))); + } + ) + } + + behind_head = behind_head + k as usize; + } + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + let header = find_block_header(h); + let _ = tx.send(Ok(Some(header))); + + // Assuming that `determine_new_blocks` uses these + // instead of ancestry: 1. + behind_head += 1; + } + AllMessages::ChainApi(ChainApiMessage::BlockWeight(h, tx)) => { + let (_, weight) = blocks.iter().find(|(hdr, _)| hdr.hash() == h).unwrap(); + let _ = tx.send(Ok(Some(*weight))); + + // Last weight has been returned. Time to go. + if h == head_hash { break } + } + _ => panic!("unexpected message"), + } + } + } + write_rx.await.unwrap(); +} + fn extract_info_from_chain(i: usize, chain: &[(Header, BlockWeight)]) -> (BlockNumber, Hash, BlockWeight) { @@ -558,6 +678,56 @@ fn leaves_ordered_by_weight_and_then_number() { }); } +#[test] +fn subtrees_imported_even_with_gaps() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A2 <- B3 <- B4 <- B5 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b5_hash, chain_b) = construct_chain_on_base( + vec![4, 4, 5], + 2, + a2_hash, + |h| salt_header(h, b"b"), + ); + + import_all_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + Vec::new(), + chain_a.clone(), + ).await; + + import_all_blocks_into( + &mut virtual_overseer, + &backend, + None, + vec![chain_a[0].0.clone(), chain_a[1].0.clone()], + chain_b.clone(), + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b5_hash, a3_hash]); + + virtual_overseer + }); +} + // TODO [now]: importing a block with reversion // TODO [now]: finalize a viable block From bf15a67ae6d4c02fbbad64fb30932b9e649fcf36 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 22:59:26 +0100 Subject: [PATCH 67/79] more reversion tests --- node/core/chain-selection/src/tests.rs | 265 ++++++++++++++++++++++++- 1 file changed, 261 insertions(+), 4 deletions(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 29caa38ac99e..24a461d94457 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -30,7 +30,7 @@ use parking_lot::Mutex; use sp_core::testing::TaskExecutor; use assert_matches::assert_matches; -use polkadot_primitives::v1::{BlakeTwo256, HashT}; +use polkadot_primitives::v1::{BlakeTwo256, HashT, ConsensusLog}; use polkadot_subsystem::{jaeger, ActiveLeavesUpdate, ActivatedLeaf, LeafStatus}; use polkadot_subsystem::messages::AllMessages; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -134,6 +134,7 @@ impl Backend for TestBackend { where I: IntoIterator { let mut inner = self.inner.lock(); + for op in ops { match op { BackendWriteOp::WriteBlockEntry(entry) => { @@ -253,6 +254,15 @@ fn salt_header(header: &mut Header, salt: impl Encode) { header.state_root = BlakeTwo256::hash_of(&salt) } +fn add_reversions( + header: &mut Header, + reversions: impl IntoIterator, +) { + for log in reversions.into_iter().map(ConsensusLog::Revert) { + header.digest.logs.push(log.into()) + } +} + // Builds a chain on top of the given base, with one block for each // provided weight. fn construct_chain_on_base( @@ -329,7 +339,6 @@ async fn import_all_blocks_into( let head = blocks.last().unwrap().0.clone(); let head_hash = head.hash(); - let head_parent_hash = head.parent_hash; let (_, write_rx) = backend.await_next_write(); virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( @@ -654,7 +663,6 @@ fn leaves_ordered_by_weight_and_then_number() { chain_a.clone(), ).await; - import_blocks_into( &mut virtual_overseer, &backend, @@ -728,7 +736,256 @@ fn subtrees_imported_even_with_gaps() { }); } -// TODO [now]: importing a block with reversion +#[test] +fn reversion_removes_viability_of_chain() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts A1 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 3 { add_reversions(h, Some(1)) } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![]); + + virtual_overseer + }); +} + +#[test] +fn reversion_removes_viability_and_finds_ancestor_as_leaf() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts A2 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 3 { add_reversions(h, Some(2)) } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a1_hash]); + + virtual_overseer + }); +} + +#[test] +fn ancestor_of_unviable_is_not_leaf_if_has_children() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // A1 <- B2 + // + // A3 reverts A2 + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (a3_hash, chain_a_ext) = construct_chain_on_base( + vec![3], + 2, + a2_hash, + |h| add_reversions(h, Some(2)), + ); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1], + 1, + a1_hash, + |h| salt_header(h, b"b") + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a2_hash, b2_hash]); + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_a_ext.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a_ext.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b2_hash]); + + virtual_overseer + }); +} + +#[test] +fn self_and_future_reversions_are_ignored() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts itself and future blocks. ignored. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 3 { add_reversions(h, vec![3, 4, 100]) } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a3_hash]); + + virtual_overseer + }); +} + +#[test] +fn revert_finalized_is_ignored() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 10; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts itself and future blocks. ignored. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 13 { add_reversions(h, vec![10, 9, 8, 0, 1]) } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a3_hash]); + + virtual_overseer + }); +} + +#[test] +fn reversion_affects_viability_of_all_subtrees() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // A2 <- B3 <- B4 + // + // B4 reverts A2. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b4_hash, chain_b) = construct_chain_on_base( + vec![3, 4], + 2, + a2_hash, + |h| { + salt_header(h, b"b"); + if h.number == 4 { + add_reversions(h, Some(2)); + } + } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_leaves(&backend, vec![a3_hash]); + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a1_hash]); + + virtual_overseer + }); +} // TODO [now]: finalize a viable block // TODO [now]: finalize an unviable block with viable descendants From 1aabcb33de0b4b59c8d849af718994bb15b37584 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 23:22:28 +0100 Subject: [PATCH 68/79] test finalization pruning subtrees --- node/core/chain-selection/src/tests.rs | 175 +++++++++++++++++++++---- 1 file changed, 151 insertions(+), 24 deletions(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 24a461d94457..aff567aedf4e 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -319,6 +319,24 @@ async fn import_blocks_into( } } +async fn import_chains_into_empty( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + finalized_number: BlockNumber, + finalized_hash: Hash, + chains: Vec>, +) { + for (i, chain)in chains.into_iter().enumerate() { + let finalized_base = Some((finalized_number, finalized_hash)).filter(|_| i == 0); + import_blocks_into( + virtual_overseer, + backend, + finalized_base, + chain, + ).await; + } +} + // Import blocks all at once. This assumes that the ancestor is known/finalized // but none of the other blocks. // import blocks 1-by-1. If `finalized_base` is supplied, @@ -438,6 +456,21 @@ async fn import_all_blocks_into( write_rx.await.unwrap(); } +async fn finalize_block( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + block_number: BlockNumber, + block_hash: Hash, +) { + let (_, write_tx) = backend.await_next_write(); + + virtual_overseer.send( + OverseerSignal::BlockFinalized(block_hash, block_number).into() + ).await; + + write_tx.await.unwrap(); +} + fn extract_info_from_chain(i: usize, chain: &[(Header, BlockWeight)]) -> (BlockNumber, Hash, BlockWeight) { @@ -466,6 +499,18 @@ fn assert_backend_contains<'a>( } } +fn assert_backend_contains_chains( + backend: &TestBackend, + chains: Vec>, +) { + for chain in chains { + assert_backend_contains( + backend, + chain.iter().map(|&(ref hdr, _)| hdr) + ) + } +} + fn assert_leaves( backend: &TestBackend, leaves: Vec, @@ -656,25 +701,12 @@ fn leaves_ordered_by_weight_and_then_number() { |h| salt_header(h, b"c"), ); - import_blocks_into( - &mut virtual_overseer, - &backend, - Some((finalized_number, finalized_hash)), - chain_a.clone(), - ).await; - - import_blocks_into( - &mut virtual_overseer, - &backend, - None, - chain_b.clone(), - ).await; - - import_blocks_into( + import_chains_into_empty( &mut virtual_overseer, &backend, - None, - chain_c.clone(), + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone(), chain_c.clone()], ).await; assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); @@ -987,20 +1019,115 @@ fn reversion_affects_viability_of_all_subtrees() { }); } -// TODO [now]: finalize a viable block +#[test] +fn finalize_viable_prunes_subtrees() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // A2 <- X3 + // F <- A1 <- A2 <- A3. + // A1 <- B2 + // F <- C1 <- C2 <- C3 + // C2 <- D3 + // + // Finalize A2. Only A2, A3, and X3 should remain. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 10], + finalized_number, + finalized_hash, + |h| salt_header(h, b"a"), + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (x3_hash, chain_x) = construct_chain_on_base( + vec![3], + 2, + a2_hash, + |h| salt_header(h, b"x"), + ); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![6], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + let (c3_hash, chain_c) = construct_chain_on_base( + vec![1, 2, 8], + finalized_number, + finalized_hash, + |h| salt_header(h, b"c"), + ); + let (_, c2_hash, _) = extract_info_from_chain(1, &chain_c); + + let (d3_hash, chain_d) = construct_chain_on_base( + vec![7], + 2, + c2_hash, + |h| salt_header(h, b"d"), + ); + + let all_chains = vec![ + chain_a.clone(), + chain_x.clone(), + chain_b.clone(), + chain_c.clone(), + chain_d.clone(), + ]; + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + all_chains.clone(), + ).await; + + assert_backend_contains_chains( + &backend, + all_chains.clone(), + ); + assert_leaves(&backend, vec![a3_hash, c3_hash, d3_hash, b2_hash, x3_hash]); + + // Finalize block A2. Now lots of blocks should go missing. + finalize_block( + &mut virtual_overseer, + &backend, + 2, + a2_hash, + ).await; + + + // A2 <- A3 + // A2 <- X3 + + assert_leaves(&backend, vec![a3_hash, x3_hash]); + assert_eq!( + backend.load_first_block_number().unwrap().unwrap(), + 3, + ); + + assert_eq!( + backend.load_blocks_by_number(3).unwrap(), + vec![a3_hash, x3_hash], + ); + + virtual_overseer + }); +} + // TODO [now]: finalize an unviable block with viable descendants // TODO [now]: finalize an unviable block with unviable descendants down the line -// TODO [now]: mark blocks as stagnant. -// TODO [now]: approve stagnant block with unviable descendant. - // TODO [now]; test find best leaf containing with no leaves. // TODO [now]: find best leaf containing when required is finalized // TODO [now]: find best leaf containing when required is unfinalized. // TODO [now]: find best leaf containing when required is ancestor of many leaves. -// TODO [now]: leaf tiebreakers are based on height. - -// TODO [now]: test assumption that each active leaf update gives 1 DB write. // TODO [now]: test assumption that each approved block gives 1 DB write. // TODO [now]: test assumption that each finalized block gives 1 DB write. From 8dff99909479525dfd6c94e0462aa06362155f8b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 23:33:11 +0100 Subject: [PATCH 69/79] fixups --- node/core/chain-selection/src/tests.rs | 33 ++++---------------------- 1 file changed, 4 insertions(+), 29 deletions(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index aff567aedf4e..44d630e5bd50 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -73,31 +73,6 @@ impl TestBackend { (pos, rx) } - - // return a receiver, expecting its position to be the given one. - fn await_next_write_expecting(&self, expected_pos: usize) -> oneshot::Receiver<()> { - let (pos, rx) = self.await_next_write(); - assert_eq!(pos, expected_pos); - - rx - } - - // return a receiver that will wake up after n other receivers, - // inserting receivers as necessary. - // - // panics if there are already more than n receivers. - fn await_nth_write(&self, n: usize) -> oneshot::Receiver<()> { - assert_ne!(n, 0, "invalid parameter 0"); - let expected_pos = n - 1; - - loop { - let (pos, rx) = self.await_next_write(); - assert!(pos <= expected_pos, "pending awaits {} > {}", pos, expected_pos); - if pos == expected_pos { - break rx; - } - } - } } impl Default for TestBackend { @@ -778,7 +753,7 @@ fn reversion_removes_viability_of_chain() { // // A3 reverts A1 - let (a3_hash, chain_a) = construct_chain_on_base( + let (_a3_hash, chain_a) = construct_chain_on_base( vec![1, 2, 3], finalized_number, finalized_hash, @@ -809,7 +784,7 @@ fn reversion_removes_viability_and_finds_ancestor_as_leaf() { // // A3 reverts A2 - let (a3_hash, chain_a) = construct_chain_on_base( + let (_a3_hash, chain_a) = construct_chain_on_base( vec![1, 2, 3], finalized_number, finalized_hash, @@ -852,7 +827,7 @@ fn ancestor_of_unviable_is_not_leaf_if_has_children() { let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); - let (a3_hash, chain_a_ext) = construct_chain_on_base( + let (_a3_hash, chain_a_ext) = construct_chain_on_base( vec![3], 2, a2_hash, @@ -983,7 +958,7 @@ fn reversion_affects_viability_of_all_subtrees() { let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); - let (b4_hash, chain_b) = construct_chain_on_base( + let (_b4_hash, chain_b) = construct_chain_on_base( vec![3, 4], 2, a2_hash, From 16c26e8a7518c305be5f7f8fa29537633629a0f4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 19 Jun 2021 23:55:53 +0100 Subject: [PATCH 70/79] test clobbering and fix bug in overlay --- node/core/chain-selection/src/backend.rs | 2 +- node/core/chain-selection/src/tests.rs | 46 ++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index bc0a3c6ca563..af2697f3a4df 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -125,7 +125,7 @@ impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { } pub(super) fn delete_block_entry(&mut self, hash: &Hash) { - self.block_entries.remove(hash); + self.block_entries.insert(*hash, None); } pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec) { diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 44d630e5bd50..513a25074b47 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -1096,6 +1096,52 @@ fn finalize_viable_prunes_subtrees() { }); } +#[test] +fn finalization_does_not_clobber_unviability() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A3 reverts A2. + // Finalize A1. + + let (_a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 10], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 3 { + add_reversions(h, Some(2)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + let a2_onwards = chain_a[1..].to_vec(); + assert_leaves(&backend, vec![]); + assert_backend_contains_chains(&backend, vec![a2_onwards]); + + virtual_overseer + }); +} + // TODO [now]: finalize an unviable block with viable descendants // TODO [now]: finalize an unviable block with unviable descendants down the line From 2b075b1193794230d10002f29ce65c83387db516 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 00:04:59 +0100 Subject: [PATCH 71/79] exhaustive backend state after finalizaiton tested --- node/core/chain-selection/src/tests.rs | 48 +++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 513a25074b47..5cb0ea8263ab 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -21,7 +21,7 @@ //! test code the ability to wait for write operations to occur. use super::*; -use std::collections::{HashMap, BTreeMap}; +use std::collections::{HashMap, HashSet, BTreeMap}; use std::sync::Arc; use futures::channel::oneshot; @@ -73,6 +73,36 @@ impl TestBackend { (pos, rx) } + + // Assert the backend contains only the given blocks and no others. + // This does not check the stagnant_at mapping because that is + // pruned lazily by the subsystem as opposed to eagerly. + fn assert_contains_only( + &self, + blocks: Vec<(BlockNumber, Hash)>, + ) { + let hashes: Vec<_> = blocks.iter().map(|(_, h)| *h).collect(); + let mut by_number: HashMap<_, HashSet<_>> = HashMap::new(); + + for (number, hash) in blocks { + by_number.entry(number).or_default().insert(hash); + } + + let inner = self.inner.lock(); + assert_eq!(inner.block_entries.len(), hashes.len()); + assert_eq!(inner.blocks_by_number.len(), by_number.len()); + + for leaf in inner.leaves.clone().into_hashes_descending() { + assert!(hashes.contains(&leaf)); + } + + for (number, hashes_at_number) in by_number { + let at = inner.blocks_by_number.get(&number).unwrap(); + for hash in at { + assert!(hashes_at_number.contains(&hash)); + } + } + } } impl Default for TestBackend { @@ -1077,10 +1107,14 @@ fn finalize_viable_prunes_subtrees() { a2_hash, ).await; - // A2 <- A3 // A2 <- X3 + backend.assert_contains_only(vec![ + (3, a3_hash), + (3, x3_hash), + ]); + assert_leaves(&backend, vec![a3_hash, x3_hash]); assert_eq!( backend.load_first_block_number().unwrap().unwrap(), @@ -1106,7 +1140,7 @@ fn finalization_does_not_clobber_unviability() { // A3 reverts A2. // Finalize A1. - let (_a3_hash, chain_a) = construct_chain_on_base( + let (a3_hash, chain_a) = construct_chain_on_base( vec![1, 2, 10], finalized_number, finalized_hash, @@ -1119,6 +1153,7 @@ fn finalization_does_not_clobber_unviability() { ); let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); import_blocks_into( &mut virtual_overseer, @@ -1134,9 +1169,12 @@ fn finalization_does_not_clobber_unviability() { a1_hash, ).await; - let a2_onwards = chain_a[1..].to_vec(); assert_leaves(&backend, vec![]); - assert_backend_contains_chains(&backend, vec![a2_onwards]); + + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + ]); virtual_overseer }); From 5a7abf90c9d7bb8968bc7ffb524064fff0818f88 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 01:58:42 +0100 Subject: [PATCH 72/79] more finality tests --- node/core/chain-selection/src/tests.rs | 258 ++++++++++++++++++++++++- 1 file changed, 255 insertions(+), 3 deletions(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 5cb0ea8263ab..cbc0c08780bc 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -1180,8 +1180,261 @@ fn finalization_does_not_clobber_unviability() { }); } -// TODO [now]: finalize an unviable block with viable descendants -// TODO [now]: finalize an unviable block with unviable descendants down the line +#[test] +fn finalization_erases_unviable() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 + // + // A2 reverts A1. + // Finalize A1. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![a3_hash, b2_hash]); + + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + (2, b2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn finalize_erases_unviable_but_keeps_later_unviability() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 + // + // A2 reverts A1. + // A3 reverts A2. + // Finalize A1. A2 is stil unviable, but B2 is viable. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + if h.number == 3 { + add_reversions(h, Some(2)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![b2_hash]); + + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + (2, b2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn finalize_erases_unviable_from_one_but_not_all_reverts() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // + // A3 reverts A2 and A1. + // Finalize A1. A2 is stil unviable. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 3 { + add_reversions(h, Some(1)); + add_reversions(h, Some(2)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![]); + + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn finalize_triggers_viability_search() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A2 <- B3 + // A2 <- C3 + // A3 reverts A1. + // Finalize A1. A3, B3, and C3 are all viable now. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 3 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b3_hash, chain_b) = construct_chain_on_base( + vec![4], + 2, + a2_hash, + |h| salt_header(h, b"b"), + ); + + let (c3_hash, chain_c) = construct_chain_on_base( + vec![5], + 2, + a2_hash, + |h| salt_header(h, b"c"), + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone(), chain_c.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![c3_hash, b3_hash, a3_hash]); + + backend.assert_contains_only(vec![ + (3, a3_hash), + (3, b3_hash), + (3, c3_hash), + (2, a2_hash), + ]); + + virtual_overseer + }); +} // TODO [now]; test find best leaf containing with no leaves. // TODO [now]: find best leaf containing when required is finalized @@ -1189,4 +1442,3 @@ fn finalization_does_not_clobber_unviability() { // TODO [now]: find best leaf containing when required is ancestor of many leaves. // TODO [now]: test assumption that each approved block gives 1 DB write. -// TODO [now]: test assumption that each finalized block gives 1 DB write. From a51298541253c3ecfc9d32e63362d40c4b7fa2b6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 04:28:04 +0100 Subject: [PATCH 73/79] leaf tests --- node/core/chain-selection/src/lib.rs | 5 + node/core/chain-selection/src/tests.rs | 310 ++++++++++++++++++++++++- 2 files changed, 310 insertions(+), 5 deletions(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 291d7ec0d467..727c3b4face1 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -353,6 +353,11 @@ async fn run_iteration(ctx: &mut Context, backend: &mut B) required, )?; + // note - this may be none if the finalized block is + // a leaf. this is fine according to the expected usage of the + // function. `None` responses should just `unwrap_or(required)`, + // so if the required block is the finalized block, then voilá. + let _ = tx.send(best_containing); } } diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index cbc0c08780bc..7fdb3081d55e 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -516,6 +516,7 @@ fn assert_backend_contains_chains( } } +// TODO [now]: check `ChainApiMessage::Leaves`. fn assert_leaves( backend: &TestBackend, leaves: Vec, @@ -523,7 +524,19 @@ fn assert_leaves( assert_eq!( backend.load_leaves().unwrap().into_hashes_descending().into_iter().collect::>(), leaves, - ) + ); +} + +async fn best_leaf_containing( + virtual_overseer: &mut VirtualOverseer, + required: Hash, +) -> Option { + let (tx, rx) = oneshot::channel(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::BestLeafContaining(required, tx) + }).await; + + rx.await.unwrap() } #[test] @@ -1436,9 +1449,296 @@ fn finalize_triggers_viability_search() { }); } -// TODO [now]; test find best leaf containing with no leaves. -// TODO [now]: find best leaf containing when required is finalized -// TODO [now]: find best leaf containing when required is unfinalized. -// TODO [now]: find best leaf containing when required is ancestor of many leaves. +#[test] +fn best_leaf_none_with_empty_db() { + test_harness(|_backend, mut virtual_overseer| async move { + let required = Hash::repeat_byte(1); + let best_leaf = best_leaf_containing(&mut virtual_overseer, required).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_none_with_no_viable_leaves() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // + // A2 reverts A1. + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a2_hash).await; + assert!(best_leaf.is_none()); + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_none_with_unknown_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + + let (_a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let unknown_hash = Hash::repeat_byte(0x69); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, unknown_hash).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_none_with_unviable_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // F <- B1 <- B2 + // + // A2 reverts A1. + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (_b2_hash, chain_b) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"b"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a2_hash).await; + assert!(best_leaf.is_none()); + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_with_finalized_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // F <- B1 <- B2 + // + // B2 > A2 + + let (_a2_hash, chain_a) = construct_chain_on_base( + vec![1, 1], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"b"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, finalized_hash).await; + assert_eq!(best_leaf, Some(b2_hash)); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_with_unfinalized_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // F <- B1 <- B2 + // + // B2 > A2 + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 1], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (_b2_hash, chain_b) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"b"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert_eq!(best_leaf, Some(a2_hash)); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_ancestor_of_all_leaves() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 <- B3 + // B2 <- C3 + // + // C3 > B3 > A3 + + let (_a3_hash, chain_a) = construct_chain_on_base( + vec![1, 1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (_b3_hash, chain_b) = construct_chain_on_base( + vec![2, 3], + 1, + a1_hash, + |h| { + salt_header(h, b"b"); + } + ); + + let (_, b2_hash, _) = extract_info_from_chain(0, &chain_b); + + let (c3_hash, chain_c) = construct_chain_on_base( + vec![4], + 2, + b2_hash, + |h| { + salt_header(h, b"c"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone(), chain_c.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert_eq!(best_leaf, Some(c3_hash)); + + virtual_overseer + }) +} + +#[test] +fn approve_message_approves_block_entry() { + +} // TODO [now]: test assumption that each approved block gives 1 DB write. From cb2d1364f7f2112771d8f3ba1b795e5de620ea26 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 04:34:09 +0100 Subject: [PATCH 74/79] test approval --- node/core/chain-selection/src/tests.rs | 109 ++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 7fdb3081d55e..d83b2eb3d99e 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -539,6 +539,19 @@ async fn best_leaf_containing( rx.await.unwrap() } +async fn approve_block( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + approved: Hash, +) { + let (_, write_rx) = backend.await_next_write(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::Approved(approved) + }).await; + + write_rx.await.unwrap() +} + #[test] fn no_op_subsystem_run() { test_harness(|_, virtual_overseer| async move { virtual_overseer }); @@ -1738,7 +1751,101 @@ fn best_leaf_ancestor_of_all_leaves() { #[test] fn approve_message_approves_block_entry() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + approve_block(&mut virtual_overseer, &backend, a3_hash).await; + + // a3 is approved, but not a1 or a2. + assert_matches!( + backend.load_block_entry(&a3_hash).unwrap().unwrap().viability.approval, + Approval::Approved + ); + + assert_matches!( + backend.load_block_entry(&a2_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + assert_matches!( + backend.load_block_entry(&a1_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + virtual_overseer + }) } -// TODO [now]: test assumption that each approved block gives 1 DB write. +#[test] +fn approve_nonexistent_has_no_effect() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + let nonexistent = Hash::repeat_byte(1); + approve_block(&mut virtual_overseer, &backend, nonexistent).await; + + // a3 is approved, but not a1 or a2. + assert_matches!( + backend.load_block_entry(&a3_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + assert_matches!( + backend.load_block_entry(&a2_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + assert_matches!( + backend.load_block_entry(&a1_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + virtual_overseer + }) +} From 4e01714c06852d0ecb5d485c70b29287312e8447 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 04:49:19 +0100 Subject: [PATCH 75/79] test ChainSelectionMessage::Leaves thoroughly --- node/core/chain-selection/src/tests.rs | 61 +++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index d83b2eb3d99e..9e6f0dd7635f 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -527,6 +527,35 @@ fn assert_leaves( ); } +async fn assert_leaves_query( + virtual_overseer: &mut VirtualOverseer, + leaves: Vec, +) { + assert!(!leaves.is_empty(), "empty leaves impossible. answer finalized query"); + + let (tx, rx) = oneshot::channel(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::Leaves(tx) + }).await; + + assert_eq!(rx.await.unwrap(), leaves); +} + +async fn assert_finalized_leaves_query( + virtual_overseer: &mut VirtualOverseer, + finalized_number: BlockNumber, + finalized_hash: Hash, +) { + let (tx, rx) = oneshot::channel(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::Leaves(tx) + }).await; + + answer_finalized_block_info(virtual_overseer, finalized_number, finalized_hash).await; + + assert_eq!(rx.await.unwrap(), vec![finalized_hash]); +} + async fn best_leaf_containing( virtual_overseer: &mut VirtualOverseer, required: Hash, @@ -578,6 +607,7 @@ fn import_direct_child_of_finalized_on_empty() { assert_eq!(backend.load_first_block_number().unwrap().unwrap(), child_number); assert_backend_contains(&backend, &[child]); assert_leaves(&backend, vec![child_hash]); + assert_leaves_query(&mut virtual_overseer, vec![child_hash]).await; virtual_overseer }) @@ -606,6 +636,7 @@ fn import_chain_on_finalized_incrementally() { assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); assert_backend_contains(&backend, chain.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![head_hash]); + assert_leaves_query(&mut virtual_overseer, vec![head_hash]).await; virtual_overseer }) @@ -649,6 +680,7 @@ fn import_two_subtrees_on_finalized() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![b_hash, a_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b_hash, a_hash]).await; virtual_overseer }) @@ -692,6 +724,7 @@ fn import_two_subtrees_on_nonzero_finalized() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![b_hash, a_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b_hash, a_hash]).await; virtual_overseer }) @@ -745,6 +778,7 @@ fn leaves_ordered_by_weight_and_then_number() { assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); assert_backend_contains(&backend, chain_c.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![c2_hash, a3_hash, b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![c2_hash, a3_hash, b2_hash]).await; virtual_overseer }); } @@ -794,6 +828,7 @@ fn subtrees_imported_even_with_gaps() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![b5_hash, a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b5_hash, a3_hash]).await; virtual_overseer }); @@ -825,6 +860,11 @@ fn reversion_removes_viability_of_chain() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![]); + assert_finalized_leaves_query( + &mut virtual_overseer, + finalized_number, + finalized_hash, + ).await; virtual_overseer }); @@ -858,6 +898,7 @@ fn reversion_removes_viability_and_finds_ancestor_as_leaf() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![a1_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a1_hash]).await; virtual_overseer }); @@ -926,6 +967,7 @@ fn ancestor_of_unviable_is_not_leaf_if_has_children() { assert_backend_contains(&backend, chain_a_ext.iter().map(|&(ref h, _)| h)); assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b2_hash]).await; virtual_overseer }); @@ -957,6 +999,7 @@ fn self_and_future_reversions_are_ignored() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash]).await; virtual_overseer }); @@ -988,6 +1031,7 @@ fn revert_finalized_is_ignored() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash]).await; virtual_overseer }); @@ -1045,6 +1089,7 @@ fn reversion_affects_viability_of_all_subtrees() { assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); assert_leaves(&backend, vec![a1_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a1_hash]).await; virtual_overseer }); @@ -1142,6 +1187,8 @@ fn finalize_viable_prunes_subtrees() { ]); assert_leaves(&backend, vec![a3_hash, x3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash, x3_hash]).await; + assert_eq!( backend.load_first_block_number().unwrap().unwrap(), 3, @@ -1196,7 +1243,11 @@ fn finalization_does_not_clobber_unviability() { ).await; assert_leaves(&backend, vec![]); - + assert_finalized_leaves_query( + &mut virtual_overseer, + 1, + a1_hash, + ).await; backend.assert_contains_only(vec![ (3, a3_hash), (2, a2_hash), @@ -1258,6 +1309,7 @@ fn finalization_erases_unviable() { ).await; assert_leaves(&backend, vec![a3_hash, b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash, b2_hash]).await; backend.assert_contains_only(vec![ (3, a3_hash), @@ -1325,6 +1377,7 @@ fn finalize_erases_unviable_but_keeps_later_unviability() { ).await; assert_leaves(&backend, vec![b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b2_hash]).await; backend.assert_contains_only(vec![ (3, a3_hash), @@ -1381,6 +1434,11 @@ fn finalize_erases_unviable_from_one_but_not_all_reverts() { ).await; assert_leaves(&backend, vec![]); + assert_finalized_leaves_query( + &mut virtual_overseer, + 1, + a1_hash, + ).await; backend.assert_contains_only(vec![ (3, a3_hash), @@ -1450,6 +1508,7 @@ fn finalize_triggers_viability_search() { ).await; assert_leaves(&backend, vec![c3_hash, b3_hash, a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![c3_hash, b3_hash, a3_hash]).await; backend.assert_contains_only(vec![ (3, a3_hash), From 9a16ffc614890accc072e3afcbfb6f55ec3c8af9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 04:49:28 +0100 Subject: [PATCH 76/79] remove TODO --- node/core/chain-selection/src/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs index 9e6f0dd7635f..945578a47e6e 100644 --- a/node/core/chain-selection/src/tests.rs +++ b/node/core/chain-selection/src/tests.rs @@ -516,7 +516,6 @@ fn assert_backend_contains_chains( } } -// TODO [now]: check `ChainApiMessage::Leaves`. fn assert_leaves( backend: &TestBackend, leaves: Vec, From 31f47de2e3a7ab55358949a4c54916c5bf2c511b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 16:48:22 +0100 Subject: [PATCH 77/79] avoid Ordering::is_ne so CI can build --- node/core/chain-selection/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 727c3b4face1..a33c788c5cd4 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -103,7 +103,7 @@ impl PartialOrd for LeafEntry { let ord = self.weight.cmp(&other.weight) .then(self.block_number.cmp(&other.block_number)); - if ord.is_ne() { Some(ord) } else { None } + if !matches!(ord, std::cmp::Ordering::Equal) { Some(ord) } else { None } } } From eeb4c89a21d06d3021d7fcd067dca0f05d73a7d8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 20 Jun 2021 17:23:32 +0100 Subject: [PATCH 78/79] comment algorithmic complexity --- node/core/chain-selection/src/backend.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs index af2697f3a4df..160825b757e7 100644 --- a/node/core/chain-selection/src/backend.rs +++ b/node/core/chain-selection/src/backend.rs @@ -212,6 +212,13 @@ fn contains_ancestor( /// /// If the required block is unfinalized but not an ancestor of any viable leaf, /// this will return `None`. +// +// Note: this is O(N^2) in the depth of `required` and the number of leaves. +// We expect the number of unfinalized blocks to be small, as in, to not exceed +// single digits in practice, and exceedingly unlikely to surpass 1000. +// +// However, if we need to, we could implement some type of skip-list for +// fast ancestry checks. pub(super) fn find_best_leaf_containing( backend: &impl Backend, required: Hash, From 4132129f9b08d5d63f59db8347a7d9a2805b6923 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 21 Jun 2021 18:07:48 +0100 Subject: [PATCH 79/79] Update node/core/chain-selection/src/lib.rs Co-authored-by: Bernhard Schuster --- node/core/chain-selection/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index a33c788c5cd4..dddfc2590d33 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -40,7 +40,7 @@ mod tree; mod tests; const LOG_TARGET: &str = "parachain::chain-selection"; - +/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. type Timestamp = u64; #[derive(Debug, Clone)]